repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
NewKnowledge/punk | punk/aggregator/aggregateByNumericRange.py | 1 | 2516 | import pandas as pd
import numpy as np
from typing import List, NamedTuple
from .numeric import range_groups
from primitive_interfaces.base import PrimitiveBase
Inputs = pd.DataFrame
Outputs = np.ndarray
Params = dict
CallMetadata = dict
class AggregateByNumericRange(PrimitiveBase[Inputs, Outputs, Params]):
__author__ = 'distil'
__metadata__ = {
"id": "5ab6f38f-d57a-30a7-8919-87d9d02954f6",
"name": "punk.aggregator.aggregateByNumericRange.AggregateByNumericRange",
"common_name": "NumericRangeAggregation",
"description": "Determine the best bins for value counts and perform the aggregation",
"languages": [
"python3.6"
],
"library": "punk",
"version": "1.1.1",
"source_code": "https://github.com/NewKnowledge/punk/blob/dev/punk/aggregator/aggregateByNumericRange.py",
"is_class": True,
"interface_type": "data_cleaning",
"algorithm_type": [
"aggregation"
],
"task_type": [
"data cleaning"
],
"output_type": [
"features"
],
"team": "distil",
"schema_version": 1.0,
"build": [
{
"type": "pip",
"package": "punk"
}
],
"compute_resources": {
"sample_size": [
1000.0,
10.0
],
"sample_unit": [
"MB"
],
"num_nodes": [
1
],
"cores_per_node": [
1
],
"gpus_per_node": [
0
],
"mem_per_node": [
1.0
],
"disk_per_node": [
1.0
],
"mem_per_gpu": [
0.0
],
"expected_running_time": [
5.0
]
}
}
def __init__(self):
pass
def get_params(self) -> Params:
return {}
def set_params(self, params: Params) -> None:
self.params = params
def get_call_metadata(self) -> CallMetadata:
return {}
def fit(self) -> None:
pass
def produce(self, inputs: Inputs, values: List[str] = []) -> Outputs:
return range_groups(inputs, values) | mit | -419,282,312,379,151,500 | 25.776596 | 114 | 0.439189 | false |
nevercast/home-assistant | homeassistant/components/device_tracker/ubus.py | 7 | 5647 | """
homeassistant.components.device_tracker.ubus
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a OpenWRT router for device
presence.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.ubus/
"""
import logging
import json
from datetime import timedelta
import re
import threading
import requests
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass, config):
""" Validates config and returns a Luci scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = UbusDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
# pylint: disable=too-many-instance-attributes
class UbusDeviceScanner(object):
"""
This class queries a wireless router running OpenWrt firmware
for connected devices. Adapted from Tomato scanner.
Configure your routers' ubus ACL based on following instructions:
http://wiki.openwrt.org/doc/techref/ubus
Read only access will be fine.
To use this class you have to install rpcd-mod-file package
in your OpenWrt router:
opkg install rpcd-mod-file
"""
def __init__(self, config):
host = config[CONF_HOST]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.lock = threading.Lock()
self.last_results = {}
self.url = 'http://{}/ubus'.format(host)
self.session_id = _get_session_id(self.url, username, password)
self.hostapd = []
self.leasefile = None
self.mac2name = None
self.success_init = self.session_id is not None
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
with self.lock:
if self.leasefile is None:
result = _req_json_rpc(self.url, self.session_id,
'call', 'uci', 'get',
config="dhcp", type="dnsmasq")
if result:
values = result["values"].values()
self.leasefile = next(iter(values))["leasefile"]
else:
return
if self.mac2name is None:
result = _req_json_rpc(self.url, self.session_id,
'call', 'file', 'read',
path=self.leasefile)
if result:
self.mac2name = dict()
for line in result["data"].splitlines():
hosts = line.split(" ")
self.mac2name[hosts[1].upper()] = hosts[3]
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the Luci router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
_LOGGER.info("Checking ARP")
if not self.hostapd:
hostapd = _req_json_rpc(self.url, self.session_id,
'list', 'hostapd.*', '')
self.hostapd.extend(hostapd.keys())
self.last_results = []
results = 0
for hostapd in self.hostapd:
result = _req_json_rpc(self.url, self.session_id,
'call', hostapd, 'get_clients')
if result:
results = results + 1
self.last_results.extend(result['clients'].keys())
return bool(results)
def _req_json_rpc(url, session_id, rpcmethod, subsystem, method, **params):
""" Perform one JSON RPC operation. """
data = json.dumps({"jsonrpc": "2.0",
"id": 1,
"method": rpcmethod,
"params": [session_id,
subsystem,
method,
params]})
try:
res = requests.post(url, data=data, timeout=5)
except requests.exceptions.Timeout:
return
if res.status_code == 200:
response = res.json()
if rpcmethod == "call":
return response["result"][1]
else:
return response["result"]
def _get_session_id(url, username, password):
""" Get authentication token for the given host+username+password. """
res = _req_json_rpc(url, "00000000000000000000000000000000", 'call',
'session', 'login', username=username,
password=password)
return res["ubus_rpc_session"]
| mit | 7,077,413,438,336,141,000 | 31.641618 | 79 | 0.551974 | false |
Comunitea/l10n-spain | l10n_es_vat_book/__openerp__.py | 1 | 1560 | # -*- coding: utf-8 -*-
# See README.rst file on addon root folder for license details
{
"name": "Libro de IVA",
"version": "8.0.1.0.1",
"author": "PRAXYA, "
"Odoo Community Association (OCA)",
"website": "http://www.praxya.com",
"license": "AGPL-3",
"category": "Accounting",
"depends": [
'account',
'base_vat',
'l10n_es',
'l10n_es_aeat',
'account_refund_original',
'account_invoice_currency',
],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'data/map_taxes_vat_book.xml',
'views/l10n_es_vat_book.xml',
'views/l10n_es_vat_book_issued_lines.xml',
'views/l10n_es_vat_book_received_lines.xml',
'views/l10n_es_vat_book_received_tax_summary.xml',
'views/l10n_es_vat_book_issued_tax_summary.xml',
'views/l10n_es_vat_book_invoice_tax_lines.xml',
'views/l10n_es_vat_book_rectification_issued_lines.xml',
'views/l10n_es_vat_book_rectification_received_lines.xml',
'views/l10n_es_vat_book_rectification_received_tax_summary.xml',
'views/l10n_es_vat_book_rectification_issued_tax_summary.xml',
'report/report_paper_format.xml',
'report/report_views.xml',
'report/vat_book_invoices_issued.xml',
'report/vat_book_invoices_received.xml',
'report/vat_book_rectification_issued_invoices.xml',
'report/vat_book_rectification_received_invoices.xml',
],
"qweb": [
],
"installable": True,
}
| agpl-3.0 | -827,756,064,103,522,700 | 34.454545 | 72 | 0.598077 | false |
open-io/oio-swift | oioswift/common/request_helpers.py | 1 | 1307 | # Copyright (c) 2018 OpenIO SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.common import exceptions, request_helpers
from swift.common.swob import HTTPBadRequest, HTTPForbidden
class OioSegmentedIterable(request_helpers.SegmentedIterable):
"""
SegmentedIterable subclass that does not melt all segment errors
into SegmentError.
"""
def validate_first_segment(self):
try:
return super(OioSegmentedIterable, self).validate_first_segment()
except exceptions.SegmentError as err:
if 'got 403 while retrieving' in err.args[0]:
raise HTTPForbidden(request=self.req)
elif 'got 400 while retrieving' in err.args[0]:
raise HTTPBadRequest(request=self.req)
else:
raise
| apache-2.0 | 2,916,379,550,164,513,000 | 36.342857 | 77 | 0.706963 | false |
jmcfarlane/chula | chula/queue/messages/mail.py | 1 | 2081 | """
Chula email message object
"""
from chula import collection
from chula.mail import Mail
from chula.queue.messages import message
class Message(message.Message):
def fill(self, msg):
super(Message, self).fill(msg)
self.message = Contract()
# Update the contract
if not msg is None:
if not msg['message'] is None:
for key, value in msg['message'].iteritems():
self.message[key] = value
def process(self):
email = Mail(self.message.smtp)
email.from_addy = self.message.from_addy
email.to_addy = self.message.to_addy
email.body = self.message.body
email.subject = self.message.subject
try:
email.send()
return 'Mail Sent'
except:
raise
def validate(self):
for key, value in self.message.iteritems():
if value == collection.UNSET:
msg = 'Required message attribute not specified: %s' % key
raise KeyError(msg)
class Contract(collection.RestrictedCollection):
def __validkeys__(self):
"""
Email message body to force the required attributes
"""
return ('body',
'from_addy',
'reply_to_addy',
'smtp',
'subject',
'to_addy')
def __defaults__(self):
self.body = collection.UNSET
self.from_addy = collection.UNSET
self.reply_to_addy = None
self.smtp = collection.UNSET
self.subject = collection.UNSET
self.to_addy = collection.UNSET
if __name__ == '__main__':
from chula.queue.messages.mail import Message
from chula.queue.tester import Tester
msg = Message()
msg.message.body = 'Hello world'
msg.message.subject = 'Testing message queue with email message'
msg.message.from_addy = '[email protected]'
msg.message.to_addy = msg.message.from_addy
msg.message.smtp = 'smtp.comcast.net'
tester = Tester()
tester.test(msg)
| gpl-2.0 | 5,380,970,740,859,378,000 | 27.902778 | 74 | 0.583373 | false |
subodhchhabra/airflow | airflow/hooks/hive_hooks.py | 3 | 36631 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals
import contextlib
import os
import re
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import hmsclient
import six
import unicodecsv as csv
from past.builtins import basestring
from past.builtins import unicode
from six.moves import zip
import airflow.security.utils as utils
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.file import TemporaryDirectory
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var():
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or configuration.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.job.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=(delimiter.encode(encoding)
if six.PY2 and isinstance(delimiter, unicode)
else delimiter),
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += '\n'
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: string
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: string
:param table_name: table name.
:type table_name: string
:param field: partition key to get max partition from.
:type field: string
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = set(key.name for key in table.partitionKeys)
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the pyhive library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
database=schema or db.schema or 'default')
def _get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
from pyhive.exc import ProgrammingError
if isinstance(hql, basestring):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, \
contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute("set {}={}".format(k, v))
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (lowered_statement.startswith('select') or
lowered_statement.startswith('with') or
(lowered_statement.startswith('set') and
'=' not in lowered_statement)):
description = [c for c in cur.description]
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(repr(description),
repr(previous_description))
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
for row in cur:
yield row
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:param schema: target schema, default to 'default'.
:param fetch_size max size of result to fetch.
:param hive_conf: hive_conf to execute alone with the hql.
:return: results of hql execution.
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:param csv_filepath: filepath of csv to write results into.
:param schema: target schema, , default to 'default'.
:param delimiter: delimiter of the csv file.
:param lineterminator: lineterminator of the csv file.
:param output_header: header of the csv file.
:param fetch_size: number of result rows to write into the csv file.
:param hive_conf: hive_conf to execute alone with the hql.
:return:
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 | -6,283,937,170,555,760,000 | 38.47306 | 90 | 0.540198 | false |
jphnoel/udata | udata/core/followers/models.py | 2 | 1187 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from udata.models import db
from .signals import on_follow, on_unfollow
__all__ = ('Follow', )
class FollowQuerySet(db.BaseQuerySet):
def following(self, user):
return self(follower=user, until=None)
def followers(self, user):
return self(following=user, until=None)
def is_following(self, user, following):
return self(follower=user, following=following, until=None).count() > 0
class Follow(db.Document):
follower = db.ReferenceField('User', required=True)
following = db.GenericReferenceField()
since = db.DateTimeField(required=True, default=datetime.now)
until = db.DateTimeField()
meta = {
'indexes': [
'follower',
'following',
('follower', 'until'),
('following', 'until'),
],
'queryset_class': FollowQuerySet,
}
@db.post_save.connect
def emit_new_follower(sender, document, **kwargs):
if isinstance(document, Follow):
if document.until:
on_unfollow.send(document)
else:
on_follow.send(document)
| agpl-3.0 | 5,721,291,466,655,686,000 | 23.729167 | 79 | 0.625948 | false |
indautgrp/erpnext | erpnext/crm/doctype/opportunity/opportunity.py | 2 | 8376 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.utils import cstr, cint, get_fullname
from frappe import msgprint, _
from frappe.model.mapper import get_mapped_doc
from erpnext.setup.utils import get_exchange_rate
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.party import get_party_account_currency
subject_field = "title"
sender_field = "contact_email"
class Opportunity(TransactionBase):
def after_insert(self):
if self.lead:
frappe.get_doc("Lead", self.lead).set_status(update=True)
def validate(self):
self._prev = frappe._dict({
"contact_date": frappe.db.get_value("Opportunity", self.name, "contact_date") if \
(not cint(self.get("__islocal"))) else None,
"contact_by": frappe.db.get_value("Opportunity", self.name, "contact_by") if \
(not cint(self.get("__islocal"))) else None,
})
self.make_new_lead_if_required()
if not self.enquiry_from:
frappe.throw(_("Opportunity From field is mandatory"))
self.set_status()
self.validate_item_details()
self.validate_uom_is_integer("uom", "qty")
self.validate_lead_cust()
self.validate_cust_name()
if not self.title:
self.title = self.customer_name
if not self.with_items:
self.items = []
def make_new_lead_if_required(self):
"""Set lead against new opportunity"""
if not (self.lead or self.customer) and self.contact_email:
lead_name = frappe.db.get_value("Lead", {"email_id": self.contact_email})
if not lead_name:
sender_name = get_fullname(self.contact_email)
if sender_name == self.contact_email:
sender_name = None
if not sender_name and ('@' in self.contact_email):
email_name = self.contact_email.split('@')[0]
email_split = email_name.split('.')
sender_name = ''
for s in email_split:
sender_name += s.capitalize() + ' '
lead = frappe.get_doc({
"doctype": "Lead",
"email_id": self.contact_email,
"lead_name": sender_name
})
lead.flags.ignore_email_validation = True
lead.insert(ignore_permissions=True)
lead_name = lead.name
self.enquiry_from = "Lead"
self.lead = lead_name
def declare_enquiry_lost(self,arg):
if not self.has_quotation():
frappe.db.set(self, 'status', 'Lost')
frappe.db.set(self, 'order_lost_reason', arg)
else:
frappe.throw(_("Cannot declare as lost, because Quotation has been made."))
def on_trash(self):
self.delete_events()
def has_quotation(self):
return frappe.db.get_value("Quotation Item", {"prevdoc_docname": self.name, "docstatus": 1})
def has_ordered_quotation(self):
return frappe.db.sql("""select q.name from `tabQuotation` q, `tabQuotation Item` qi
where q.name = qi.parent and q.docstatus=1 and qi.prevdoc_docname =%s and q.status = 'Ordered'""", self.name)
def validate_cust_name(self):
if self.customer:
self.customer_name = frappe.db.get_value("Customer", self.customer, "customer_name")
elif self.lead:
lead_name, company_name = frappe.db.get_value("Lead", self.lead, ["lead_name", "company_name"])
self.customer_name = company_name or lead_name
def get_cust_address(self,name):
details = frappe.db.sql("""select customer_name, address, territory, customer_group
from `tabCustomer` where name = %s and docstatus != 2""", (name), as_dict = 1)
if details:
ret = {
'customer_name': details and details[0]['customer_name'] or '',
'address' : details and details[0]['address'] or '',
'territory' : details and details[0]['territory'] or '',
'customer_group' : details and details[0]['customer_group'] or ''
}
# ********** get primary contact details (this is done separately coz. , in case there is no primary contact thn it would not be able to fetch customer details in case of join query)
contact_det = frappe.db.sql("""select contact_name, contact_no, email_id
from `tabContact` where customer = %s and is_customer = 1
and is_primary_contact = 'Yes' and docstatus != 2""", name, as_dict = 1)
ret['contact_person'] = contact_det and contact_det[0]['contact_name'] or ''
ret['contact_no'] = contact_det and contact_det[0]['contact_no'] or ''
ret['email_id'] = contact_det and contact_det[0]['email_id'] or ''
return ret
else:
frappe.throw(_("Customer {0} does not exist").format(name), frappe.DoesNotExistError)
def on_update(self):
self.add_calendar_event()
def add_calendar_event(self, opts=None, force=False):
if not opts:
opts = frappe._dict()
opts.description = ""
opts.contact_date = self.contact_date
if self.customer:
if self.contact_person:
opts.description = 'Contact '+cstr(self.contact_person)
else:
opts.description = 'Contact customer '+cstr(self.customer)
elif self.lead:
if self.contact_display:
opts.description = 'Contact '+cstr(self.contact_display)
else:
opts.description = 'Contact lead '+cstr(self.lead)
opts.subject = opts.description
opts.description += '. By : ' + cstr(self.contact_by)
if self.to_discuss:
opts.description += ' To Discuss : ' + cstr(self.to_discuss)
super(Opportunity, self).add_calendar_event(opts, force)
def validate_item_details(self):
if not self.get('items'):
return
# set missing values
item_fields = ("item_name", "description", "item_group", "brand")
for d in self.items:
if not d.item_code:
continue
item = frappe.db.get_value("Item", d.item_code, item_fields, as_dict=True)
for key in item_fields:
if not d.get(key): d.set(key, item.get(key))
def validate_lead_cust(self):
if self.enquiry_from == 'Lead':
if not self.lead:
frappe.throw(_("Lead must be set if Opportunity is made from Lead"))
else:
self.customer = None
elif self.enquiry_from == 'Customer':
if not self.customer:
msgprint("Customer is mandatory if 'Opportunity From' is selected as Customer", raise_exception=1)
else:
self.lead = None
@frappe.whitelist()
def get_item_details(item_code):
item = frappe.db.sql("""select item_name, stock_uom, image, description, item_group, brand
from `tabItem` where name = %s""", item_code, as_dict=1)
return {
'item_name': item and item[0]['item_name'] or '',
'uom': item and item[0]['stock_uom'] or '',
'description': item and item[0]['description'] or '',
'image': item and item[0]['image'] or '',
'item_group': item and item[0]['item_group'] or '',
'brand': item and item[0]['brand'] or ''
}
@frappe.whitelist()
def make_quotation(source_name, target_doc=None):
def set_missing_values(source, target):
quotation = frappe.get_doc(target)
company_currency = frappe.db.get_value("Company", quotation.company, "default_currency")
party_account_currency = get_party_account_currency("Customer", quotation.customer,
quotation.company) if quotation.customer else company_currency
quotation.currency = party_account_currency or company_currency
if company_currency == quotation.currency:
exchange_rate = 1
else:
exchange_rate = get_exchange_rate(quotation.currency, company_currency)
quotation.conversion_rate = exchange_rate
quotation.run_method("set_missing_values")
quotation.run_method("calculate_taxes_and_totals")
doclist = get_mapped_doc("Opportunity", source_name, {
"Opportunity": {
"doctype": "Quotation",
"field_map": {
"enquiry_from": "quotation_to",
"enquiry_type": "order_type",
"name": "enq_no",
}
},
"Opportunity Item": {
"doctype": "Quotation Item",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"uom": "stock_uom"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_supplier_quotation(source_name, target_doc=None):
doclist = get_mapped_doc("Opportunity", source_name, {
"Opportunity": {
"doctype": "Supplier Quotation",
"field_map": {
"name": "opportunity"
}
},
"Opportunity Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"uom": "stock_uom"
}
}
}, target_doc)
return doclist
@frappe.whitelist()
def set_multiple_status(names, status):
names = json.loads(names)
for name in names:
opp = frappe.get_doc("Opportunity", name)
opp.status = status
opp.save()
| gpl-3.0 | 170,767,392,767,994,750 | 30.969466 | 185 | 0.675382 | false |
CloudWareChile/OpenChile | openerp/test/test_osv.py | 14 | 4143 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.osv.query import Query
class QueryTestCase(unittest.TestCase):
def test_basic_query(self):
query = Query()
query.tables.extend(['"product_product"','"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.join(("product_template", "product_category", "categ_id", "id"), outer=False) # add normal join
query.join(("product_product", "res_user", "user_id", "id"), outer=True) # outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product" LEFT JOIN "res_user" ON ("product_product"."user_id" = "res_user"."id"),"product_template" JOIN "product_category" ON ("product_template"."categ_id" = "product_category"."id") """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_query_chained_explicit_joins(self):
query = Query()
query.tables.extend(['"product_product"','"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.join(("product_template", "product_category", "categ_id", "id"), outer=False) # add normal join
query.join(("product_category", "res_user", "user_id", "id"), outer=True) # CHAINED outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" ON ("product_template"."categ_id" = "product_category"."id") LEFT JOIN "res_user" ON ("product_category"."user_id" = "res_user"."id")""".strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query()
query.tables.extend(['"product_product"','"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.join(("product_template", "product_category", "categ_id", "id"), outer=False) # add normal join
query.join(("product_category", "res_user", "user_id", "id"), outer=True) # CHAINED outer join
query.tables.append('"account.account"')
query.where_clause.append("product_category.expense_account_id = account_account.id") # additional implicit join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" ON ("product_template"."categ_id" = "product_category"."id") LEFT JOIN "res_user" ON ("product_category"."user_id" = "res_user"."id"),"account.account" """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id""".strip())
def test_raise_missing_lhs(self):
query = Query()
query.tables.append('"product_product"')
self.assertRaises(AssertionError, query.join, ("product_template", "product_category", "categ_id", "id"), outer=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,466,175,959,335,884,000 | 61.772727 | 240 | 0.646874 | false |
graingert/pip | docs/conf.py | 2 | 7030 | # -*- coding: utf-8 -*-
#
# pip documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 22:08:49 2008
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.pardir))
# sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc']
extensions = ['sphinx.ext.extlinks', 'docs.pipext']
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pip'
copyright = '2008-2014, PyPA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
from pip import __version__
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
except ImportError:
version = release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
extlinks = {
'issue': ('https://github.com/pypa/pip/issues/%s', '#'),
'pull': ('https://github.com/pypa/pip/pull/%s', 'PR #'),
}
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
if not on_rtd:
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = '_static/piplogo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pipdocs'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
(
'index',
'pip.tex',
u'pip Documentation',
u'The pip developers',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| mit | 7,698,080,838,145,617,000 | 31.100457 | 79 | 0.695164 | false |
asimshankar/tensorflow | tensorflow/contrib/distribute/python/strategy_test_lib.py | 1 | 11524 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for testing DistributionStrategy descendants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
class _TestException(Exception):
pass
# May be the argument to either distribution.call_for_each_replica() or
# get_replica_context().merge_call()
def _raise_exception_fn(_=None):
raise _TestException()
# Must be the argument to a distribution.call_for_each_replica() call, calls a
# get_replica_context().merge_call() that raises an exception.
def _merge_raises_fn():
ds_context.get_replica_context().merge_call(_raise_exception_fn)
# Must be the argument to a get_replica_context().merge_call() call, calls
# dist.call_for_each_replica() with a function that raises an exception.
def _call_raises_fn(dist):
dist.call_for_each_replica(_raise_exception_fn)
# Must be the argument to a distribution.call_for_each_replica() call,
# calls a get_replica_context().merge_call() that calls a
# call_for_each_replica() that raises an exception.
def _merge_call_raises_fn():
ds_context.get_replica_context().merge_call(_call_raises_fn)
# Must be the argument to a get_replica_context().merge_call() call, calls
# dist.call_for_each_replica() with a function that calls a
# get_replica_context().merge_call() that raises an exception.
def _call_merge_raises_fn(dist):
dist.call_for_each_replica(_merge_raises_fn)
# Must be the argument to a distribution.call_for_each_replica() call, calls a
# get_replica_context().merge_call() that calls a call_for_each_replica() that
# calls a get_replica_context().merge_call() that raises an exception.
def _merge_call_merge_raises_fn():
ds_context.get_replica_context().merge_call(_call_merge_raises_fn)
class DistributionTestBase(test.TestCase):
"""Some tests that should work with any DistributionStrategy."""
def _test_minimize_loss_eager(self, d):
with d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(isaprykin): Extract implicit_grad+get_filtered_grad_fn into a
# common `implicit_grad` function and put it in DistributionStrategy.
grad_fn = backprop.implicit_grad(loss)
grad_fn = optimizer.get_filtered_grad_fn(grad_fn)
def update(v, g):
return v.assign_sub(0.2 * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
# control_dependencies irrelevant but harmless in eager execution
with ops.control_dependencies([fetched]):
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(d.update(
v, update, g, grouped=False)):
after_list.append(d.read_var(v))
return before_list, after_list
for i in range(10):
b, a = step()
if i == 0:
before, = b # pylint: disable=unbalanced-tuple-unpacking
after, = a # pylint: disable=unbalanced-tuple-unpacking
error_before = abs(before.numpy() - 1)
error_after = abs(after.numpy() - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_minimize_loss_graph(self, d, soft_placement=False,
learning_rate=0.2):
config = config_pb2.ConfigProto()
config.allow_soft_placement = soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with context.graph_mode(), \
ops.Graph().as_default(), \
self.cached_session(config=config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
grad_fn = backprop.implicit_grad(loss)
def update(v, g):
return v.assign_sub(learning_rate * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(d.update(
v, update, g, grouped=False)):
after_list.append(d.read_var(v))
return before_list, after_list
before_out, after_out = step()
variables.global_variables_initializer().run()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_replica_id(self, d):
with d.scope():
expected_devices = [False] * len(d.extended.worker_devices)
def mark_devices_fn():
replica_id = self.evaluate(
ds_context.get_replica_context().replica_id_in_sync_group)
self.assertLess(replica_id, len(d.extended.worker_devices))
self.assertFalse(expected_devices[replica_id])
expected_devices[replica_id] = True
d.call_for_each_replica(mark_devices_fn)
self.assertAllEqual(expected_devices,
[True] * len(d.extended.worker_devices))
def _test_call_and_merge_exceptions(self, dist):
with dist.scope():
with self.assertRaises(_TestException):
dist.call_for_each_replica(_raise_exception_fn)
with self.assertRaises(_TestException):
dist.call_for_each_replica(_merge_raises_fn)
with self.assertRaises(_TestException):
dist.call_for_each_replica(_merge_call_raises_fn)
with self.assertRaises(_TestException):
dist.call_for_each_replica(_merge_call_merge_raises_fn)
def _input_fn_to_test_input_context(self,
dataset_fn,
expected_num_replicas_in_sync,
expected_num_input_pipelines,
expected_input_pipeline_id):
# Use a list of one element as counter so that it can be captured by the
# `_input_fn`. This counter is incremented by 1 each time an input_fn is
# called. We use this counter to check whether the `input_pipeline_id`
# matches the counter in the in-graph replication.
worker_id_counter = [0]
def _input_fn(input_context):
"""Input fn for testing."""
self.assertIsNotNone(input_context)
self.assertEqual(expected_num_replicas_in_sync,
input_context.num_replicas_in_sync)
self.assertEqual(expected_num_input_pipelines,
input_context.num_input_pipelines)
if expected_input_pipeline_id is not None:
self.assertEqual(expected_input_pipeline_id,
input_context.input_pipeline_id)
else:
self.assertEqual(worker_id_counter[0], input_context.input_pipeline_id)
worker_id_counter[0] += 1
return dataset_fn()
return _input_fn
def _test_input_fn_iterator(self, iterator, devices, expected_values,
sess=None):
evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)
evaluate(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = evaluate(
[values.select_device(d, next_element) for d in devices])
self.assertEqual(expected_value, computed_value)
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
evaluate([values.select_device(d, next_element) for d in devices])
# After re-initializing the iterator, should be able to iterate again.
evaluate(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = evaluate(
[values.select_device(d, next_element) for d in devices])
self.assertEqual(expected_value, computed_value)
def _test_global_step_update(self, strategy):
with strategy.scope():
global_step = variable_scope.get_variable(
"global_step",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables.global_variables_initializer())
def model_fn():
train_op = global_step.assign_add(1)
value = global_step.read_value()
return train_op, value
train_ops, value = strategy.call_for_each_replica(model_fn)
self.evaluate(strategy.group(train_ops))
global_step_tensors = strategy.unwrap(value)
global_step_values = self.evaluate(global_step_tensors)
self.assertEqual((1,) * len(global_step_tensors), global_step_values)
| apache-2.0 | -6,439,386,105,731,751,000 | 38.331058 | 84 | 0.653766 | false |
maziarraissi/ParametricGP-in-Python | PGPs_autograd/PGP/Utilities.py | 1 | 1168 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maziar Raissi
"""
import autograd.numpy as np
def kernel(X, Xp, hyp):
output_scale = np.exp(hyp[0])
lengthscales = np.sqrt(np.exp(hyp[1:]))
X = X/lengthscales
Xp = Xp/lengthscales
X_SumSquare = np.sum(np.square(X),axis=1);
Xp_SumSquare = np.sum(np.square(Xp),axis=1);
mul = np.dot(X,Xp.T);
dists = X_SumSquare[:,np.newaxis]+Xp_SumSquare-2.0*mul
return output_scale * np.exp(-0.5 * dists)
def stochastic_update_Adam(w,grad_w,mt,vt,lrate,iteration):
beta1 = 0.9;
beta2 = 0.999;
epsilon = 1e-8;
mt = mt*beta1 + (1.0-beta1)*grad_w;
vt = vt*beta2 + (1.0-beta2)*grad_w**2;
mt_hat = mt/(1.0-beta1**iteration);
vt_hat = vt/(1.0-beta2**iteration);
scal = 1.0/(np.sqrt(vt_hat) + epsilon);
w = w - lrate*mt_hat*scal;
return w,mt,vt
def Normalize(X, X_m, X_s):
return (X-X_m)/(X_s)
def Denormalize(X, X_m, X_s):
return X_s*X + X_m
def fetch_minibatch(X,y,N_batch):
N = X.shape[0]
idx = np.random.permutation(N)
X_batch = X[idx[0:N_batch],:]
y_batch = y[idx[0:N_batch]]
return X_batch, y_batch | mit | -4,844,009,523,158,646,000 | 23.354167 | 59 | 0.581336 | false |
andrewyoung1991/abjad | abjad/tools/documentationtools/ReSTTOCItem.py | 1 | 1154 | # -*- encoding: utf-8 -*-
from abjad.tools import stringtools
from abjad.tools.datastructuretools.TreeNode import TreeNode
class ReSTTOCItem(TreeNode):
r'''A ReST TOC item.
::
>>> item = documentationtools.ReSTTOCItem(text='api/index')
>>> item
ReSTTOCItem(
text='api/index'
)
::
>>> print(item.rest_format)
api/index
'''
### INITIALIZER ###
def __init__(self, name=None, text='foo'):
TreeNode.__init__(self, name)
self.text = text
### PRIVATE PROPERTIES ###
@property
def _rest_format_contributions(self):
return [self.text]
### PUBLIC PROPERTIES ###
@property
def rest_format(self):
r'''ReST format of ReST TOC item.
Returns string.
'''
return '\n'.join(self._rest_format_contributions)
@property
def text(self):
r'''Gets and sets text of ReST TOC item.
Returns string.
'''
return self._text
@text.setter
def text(self, arg):
assert stringtools.is_string(arg)
arg = arg.strip()
self._text = arg
| gpl-3.0 | 5,943,626,681,470,671,000 | 18.896552 | 67 | 0.551127 | false |
tinkerthaler/odoo | addons/account/account.py | 19 | 190583 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
import openerp
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
def check_cycle(self, cr, uid, ids, context=None):
""" climbs the ``self._table.parent_id`` chains for 100 levels or
until it can't find any more parent(s)
Returns true if it runs out of parents (no cycle), false if
it can recurse 100 times without ending all chains
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT parent_id '\
'FROM '+self._table+' '\
'WHERE id IN %s '\
'AND parent_id IS NOT NULL',(tuple(ids),))
ids = map(itemgetter(0), cr.fetchall())
if not level:
return False
level -= 1
return True
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'income_currency_exchange_account_id': fields.many2one(
'account.account',
string="Gain Exchange Rate Account",
domain="[('type', '=', 'other')]",),
'expense_currency_exchange_account_id': fields.many2one(
'account.account',
string="Loss Exchange Rate Account",
domain="[('type', '=', 'other')]",),
}
class account_payment_term(osv.osv):
_name = "account.payment.term"
_description = "Payment Term"
_columns = {
'name': fields.char('Payment Term', translate=True, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the payment term without removing it."),
'note': fields.text('Description', translate=True),
'line_ids': fields.one2many('account.payment.term.line', 'payment_id', 'Terms', copy=True),
}
_defaults = {
'active': 1,
}
_order = "name"
def compute(self, cr, uid, id, value, date_ref=False, context=None):
if not date_ref:
date_ref = datetime.now().strftime('%Y-%m-%d')
pt = self.browse(cr, uid, id, context=context)
amount = value
result = []
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
for line in pt.line_ids:
if line.value == 'fixed':
amt = round(line.value_amount, prec)
elif line.value == 'procent':
amt = round(value * line.value_amount, prec)
elif line.value == 'balance':
amt = round(amount, prec)
if amt:
next_date = (datetime.strptime(date_ref, '%Y-%m-%d') + relativedelta(days=line.days))
if line.days2 < 0:
next_first_date = next_date + relativedelta(day=1,months=1) #Getting 1st of next month
next_date = next_first_date + relativedelta(days=line.days2)
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
result.append( (next_date.strftime('%Y-%m-%d'), amt) )
amount -= amt
amount = reduce(lambda x,y: x+y[1], result, 0.0)
dist = round(value-amount, prec)
if dist:
result.append( (time.strftime('%Y-%m-%d'), dist) )
return result
class account_payment_term_line(osv.osv):
_name = "account.payment.term.line"
_description = "Payment Term Line"
_columns = {
'value': fields.selection([('procent', 'Percent'),
('balance', 'Balance'),
('fixed', 'Fixed Amount')], 'Computation',
required=True, help="""Select here the kind of valuation related to this payment term line. Note that you should have your last line with the type 'Balance' to ensure that the whole amount will be treated."""),
'value_amount': fields.float('Amount To Pay', digits_compute=dp.get_precision('Payment Term'), help="For percent enter a ratio between 0-1."),
'days': fields.integer('Number of Days', required=True, help="Number of days to add before computation of the day of month." \
"If Date=15/01, Number of Days=22, Day of Month=-1, then the due date is 28/02."),
'days2': fields.integer('Day of the Month', required=True, help="Day of the month, set -1 for the last day of the current month. If it's positive, it gives the day of the next month. Set 0 for net days (otherwise it's based on the beginning of the month)."),
'payment_id': fields.many2one('account.payment.term', 'Payment Term', required=True, select=True, ondelete='cascade'),
}
_defaults = {
'value': 'balance',
'days': 30,
'days2': 0,
}
_order = "value desc,days"
def _check_percent(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.value == 'procent' and ( obj.value_amount < 0.0 or obj.value_amount > 1.0):
return False
return True
_constraints = [
(_check_percent, 'Percentages for Payment Term Line must be between 0 and 1, Example: 0.02 for 2%.', ['value_amount']),
]
class account_account_type(osv.osv):
_name = "account.account.type"
_description = "Account Type"
def _get_financial_report_ref(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
obj_financial_report = self.pool.get('account.financial.report')
financial_report_ref = {}
for key, financial_report in [
('asset','account_financial_report_assets0'),
('liability','account_financial_report_liability0'),
('income','account_financial_report_income0'),
('expense','account_financial_report_expense0'),
]:
try:
financial_report_ref[key] = obj_financial_report.browse(cr, uid,
obj_data.get_object_reference(cr, uid, 'account', financial_report)[1],
context=context)
except ValueError:
pass
return financial_report_ref
def _get_current_report_type(self, cr, uid, ids, name, arg, context=None):
res = {}
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = 'none'
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if record.id in list_ids:
res[record.id] = key
return res
def _save_report_type(self, cr, uid, account_type_id, field_name, field_value, arg, context=None):
field_value = field_value or 'none'
obj_financial_report = self.pool.get('account.financial.report')
#unlink if it exists somewhere in the financial reports related to BS or PL
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if account_type_id in list_ids:
obj_financial_report.write(cr, uid, [financial_report.id], {'account_type_ids': [(3, account_type_id)]})
#write it in the good place
if field_value != 'none':
return obj_financial_report.write(cr, uid, [financial_report_ref[field_value].id], {'account_type_ids': [(4, account_type_id)]})
_columns = {
'name': fields.char('Account Type', required=True, translate=True),
'code': fields.char('Code', size=32, required=True, select=True),
'close_method': fields.selection([('none', 'None'), ('balance', 'Balance'), ('detail', 'Detail'), ('unreconciled', 'Unreconciled')], 'Deferral Method', required=True, help="""Set here the method that will be used to generate the end of year journal entries for all the accounts of this type.
'None' means that nothing will be done.
'Balance' will generally be used for cash accounts.
'Detail' will copy each existing journal item of the previous year, even the reconciled ones.
'Unreconciled' will copy only the journal items that were unreconciled on the first day of the new fiscal year."""),
'report_type': fields.function(_get_current_report_type, fnct_inv=_save_report_type, type='selection', string='P&L / BS Category', store=True,
selection= [('none','/'),
('income', _('Profit & Loss (Income account)')),
('expense', _('Profit & Loss (Expense account)')),
('asset', _('Balance Sheet (Asset account)')),
('liability', _('Balance Sheet (Liability account)'))], help="This field is used to generate legal reports: profit and loss, balance sheet.", required=True),
'note': fields.text('Description'),
}
_defaults = {
'close_method': 'none',
'report_type': 'none',
}
_order = "code"
def _code_get(self, cr, uid, context=None):
acc_type_obj = self.pool.get('account.account.type')
ids = acc_type_obj.search(cr, uid, [])
res = acc_type_obj.read(cr, uid, ids, ['code', 'name'], context=context)
return [(r['code'], r['name']) for r in res]
#----------------------------------------------------------
# Accounts
#----------------------------------------------------------
class account_account(osv.osv):
_order = "parent_left"
_parent_order = "code"
_name = "account.account"
_description = "Account"
_parent_store = True
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
if context is None:
context = {}
pos = 0
while pos < len(args):
if args[pos][0] == 'code' and args[pos][1] in ('like', 'ilike') and args[pos][2]:
args[pos] = ('code', '=like', tools.ustr(args[pos][2].replace('%', ''))+'%')
if args[pos][0] == 'journal_id':
if not args[pos][2]:
del args[pos]
continue
jour = self.pool.get('account.journal').browse(cr, uid, args[pos][2], context=context)
if (not (jour.account_control_ids or jour.type_control_ids)) or not args[pos][2]:
args[pos] = ('type','not in',('consolidation','view'))
continue
ids3 = map(lambda x: x.id, jour.type_control_ids)
ids1 = super(account_account, self).search(cr, uid, [('user_type', 'in', ids3)])
ids1 += map(lambda x: x.id, jour.account_control_ids)
args[pos] = ('id', 'in', ids1)
pos += 1
if context and context.has_key('consolidate_children'): #add consolidated children of accounts
ids = super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
for consolidate_child in self.browse(cr, uid, context['account_id'], context=context).child_consol_ids:
ids.append(consolidate_child.id)
return ids
return super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _get_children_and_consol(self, cr, uid, ids, context=None):
#this function search for all the children and all consolidated children (recursively) of the given account ids
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)], context=context)
ids3 = []
for rec in self.browse(cr, uid, ids2, context=context):
for child in rec.child_consol_ids:
ids3.append(child.id)
if ids3:
ids3 = self._get_children_and_consol(cr, uid, ids3, context)
return ids2 + ids3
def __compute(self, cr, uid, ids, field_names, arg=None, context=None,
query='', query_params=()):
""" compute the balance, debit and/or credit for the provided
account ids
Arguments:
`ids`: account ids
`field_names`: the fields to compute (a list of any of
'balance', 'debit' and 'credit')
`arg`: unused fields.function stuff
`query`: additional query filter (as a string)
`query_params`: parameters for the provided query string
(__compute will handle their escaping) as a
tuple
"""
mapping = {
'balance': "COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as balance",
'debit': "COALESCE(SUM(l.debit), 0) as debit",
'credit': "COALESCE(SUM(l.credit), 0) as credit",
# by convention, foreign_balance is 0 when the account has no secondary currency, because the amounts may be in different currencies
'foreign_balance': "(SELECT CASE WHEN currency_id IS NULL THEN 0 ELSE COALESCE(SUM(l.amount_currency), 0) END FROM account_account WHERE id IN (l.account_id)) as foreign_balance",
}
#get all the necessary accounts
children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)
#compute for each account the balance/debit/credit from the move lines
accounts = {}
res = {}
null_result = dict((fn, 0.0) for fn in field_names)
if children_and_consolidated:
aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
wheres = [""]
if query.strip():
wheres.append(query.strip())
if aml_query.strip():
wheres.append(aml_query.strip())
filters = " AND ".join(wheres)
# IN might not work ideally in case there are too many
# children_and_consolidated, in that case join on a
# values() e.g.:
# SELECT l.account_id as id FROM account_move_line l
# INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
# ON l.account_id = tmp.id
# or make _get_children_and_consol return a query and join on that
request = ("SELECT l.account_id as id, " +\
', '.join(mapping.values()) +
" FROM account_move_line l" \
" WHERE l.account_id IN %s " \
+ filters +
" GROUP BY l.account_id")
params = (tuple(children_and_consolidated),) + query_params
cr.execute(request, params)
for row in cr.dictfetchall():
accounts[row['id']] = row
# consolidate accounts with direct children
children_and_consolidated.reverse()
brs = list(self.browse(cr, uid, children_and_consolidated, context=context))
sums = {}
currency_obj = self.pool.get('res.currency')
while brs:
current = brs.pop(0)
# can_compute = True
# for child in current.child_id:
# if child.id not in sums:
# can_compute = False
# try:
# brs.insert(0, brs.pop(brs.index(child)))
# except ValueError:
# brs.insert(0, child)
# if can_compute:
for fn in field_names:
sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
for child in current.child_id:
if child.company_id.currency_id.id == current.company_id.currency_id.id:
sums[current.id][fn] += sums[child.id][fn]
else:
sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
# as we have to relay on values computed before this is calculated separately than previous fields
if current.currency_id and current.exchange_rate and \
('adjusted_balance' in field_names or 'unrealized_gain_loss' in field_names):
# Computing Adjusted Balance and Unrealized Gains and losses
# Adjusted Balance = Foreign Balance / Exchange Rate
# Unrealized Gains and losses = Adjusted Balance - Balance
adj_bal = sums[current.id].get('foreign_balance', 0.0) / current.exchange_rate
sums[current.id].update({'adjusted_balance': adj_bal, 'unrealized_gain_loss': adj_bal - sums[current.id].get('balance', 0.0)})
for id in ids:
res[id] = sums.get(id, null_result)
else:
for id in ids:
res[id] = null_result
return res
def _get_company_currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = (rec.company_id.currency_id.id,rec.company_id.currency_id.symbol)
return result
def _get_child_ids(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for record in self.browse(cr, uid, ids, context=context):
if record.child_parent_ids:
result[record.id] = [x.id for x in record.child_parent_ids]
else:
result[record.id] = []
if record.child_consol_ids:
for acc in record.child_consol_ids:
if acc.id not in result[record.id]:
result[record.id].append(acc.id)
return result
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
#we may not know the level of the parent at the time of computation, so we
# can't simply do res[account.id] = account.parent_id.level + 1
level = 0
parent = account.parent_id
while parent:
level += 1
parent = parent.parent_id
res[account.id] = level
return res
def _set_credit_debit(self, cr, uid, account_id, name, value, arg, context=None):
if context.get('config_invisible', True):
return True
account = self.browse(cr, uid, account_id, context=context)
diff = value - getattr(account,name)
if not diff:
return True
journal_obj = self.pool.get('account.journal')
jids = journal_obj.search(cr, uid, [('type','=','situation'),('centralisation','=',1),('company_id','=',account.company_id.id)], context=context)
if not jids:
raise osv.except_osv(_('Error!'),_("You need an Opening journal with centralisation checked to set the initial balance."))
period_obj = self.pool.get('account.period')
pids = period_obj.search(cr, uid, [('special','=',True),('company_id','=',account.company_id.id)], context=context)
if not pids:
raise osv.except_osv(_('Error!'),_("There is no opening/closing period defined, please create one to set the initial balance."))
move_obj = self.pool.get('account.move.line')
move_id = move_obj.search(cr, uid, [
('journal_id','=',jids[0]),
('period_id','=',pids[0]),
('account_id','=', account_id),
(name,'>', 0.0),
('name','=', _('Opening Balance'))
], context=context)
if move_id:
move = move_obj.browse(cr, uid, move_id[0], context=context)
move_obj.write(cr, uid, move_id[0], {
name: diff+getattr(move,name)
}, context=context)
else:
if diff<0.0:
raise osv.except_osv(_('Error!'),_("Unable to adapt the initial balance (negative value)."))
nameinv = (name=='credit' and 'debit') or 'credit'
move_id = move_obj.create(cr, uid, {
'name': _('Opening Balance'),
'account_id': account_id,
'journal_id': jids[0],
'period_id': pids[0],
name: diff,
nameinv: 0.0
}, context=context)
return True
_columns = {
'name': fields.char('Name', required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('view', 'View'),
('other', 'Regular'),
('receivable', 'Receivable'),
('payable', 'Payable'),
('liquidity','Liquidity'),
('consolidation', 'Consolidation'),
('closed', 'Closed'),
], 'Internal Type', required=True, help="The 'Internal Type' is used for features available on "\
"different types of accounts: view can not have journal items, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="Account Type is used for information purpose, to generate "
"country-specific legal reports, and set the rules to close a fiscal year and generate opening entries."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_account_financial_report', 'account_id', 'report_line_id', 'Financial Reports'),
'parent_id': fields.many2one('account.account', 'Parent', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids': fields.one2many('account.account','parent_id','Children'),
'child_consol_ids': fields.many2many('account.account', 'account_account_consol_rel', 'child_id', 'parent_id', 'Consolidated Children'),
'child_id': fields.function(_get_child_ids, type='many2many', relation="account.account", string="Child Accounts"),
'balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Balance', multi='balance'),
'credit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Credit', multi='balance'),
'debit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Debit', multi='balance'),
'foreign_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Foreign Balance', multi='balance',
help="Total amount (in Secondary currency) for transactions held in secondary currency for this account."),
'adjusted_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Adjusted Balance', multi='balance',
help="Total amount (in Company currency) for transactions held in secondary currency for this account."),
'unrealized_gain_loss': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Unrealized Gain or Loss', multi='balance',
help="Value of Loss or Gain due to changes in exchange rate when doing multi-currency transactions."),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this box if this account allows reconciliation of journal items."),
'exchange_rate': fields.related('currency_id', 'rate', type='float', string='Exchange Rate', digits=(12,6)),
'shortcut': fields.char('Shortcut', size=12),
'tax_ids': fields.many2many('account.tax', 'account_account_tax_default_rel',
'account_id', 'tax_id', 'Default Taxes'),
'note': fields.text('Internal Notes'),
'company_currency_id': fields.function(_get_company_currency, type='many2one', relation='res.currency', string='Company Currency'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'active': fields.boolean('Active', select=2, help="If the active field is set to False, it will allow you to hide the account without removing it."),
'parent_left': fields.integer('Parent Left', select=1),
'parent_right': fields.integer('Parent Right', select=1),
'currency_mode': fields.selection([('current', 'At Date'), ('average', 'Average Rate')], 'Outgoing Currencies Rate',
help=
'This will select how the current currency rate for outgoing transactions is computed. '\
'In most countries the legal method is "average" but only a few software systems are able to '\
'manage this. So if you import from another software system you may have to use the rate at date. ' \
'Incoming transactions always use the rate at date.', \
required=True),
'level': fields.function(_get_level, string='Level', method=True, type='integer',
store={
'account.account': (_get_children_and_consol, ['level', 'parent_id'], 10),
}),
}
_defaults = {
'type': 'other',
'reconcile': False,
'active': True,
'currency_mode': 'current',
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.account', context=c),
}
def _check_recursion(self, cr, uid, ids, context=None):
obj_self = self.browse(cr, uid, ids[0], context=context)
p_id = obj_self.parent_id and obj_self.parent_id.id
if (obj_self in obj_self.child_consol_ids) or (p_id and (p_id is obj_self.id)):
return False
while(ids):
cr.execute('SELECT DISTINCT child_id '\
'FROM account_account_consol_rel '\
'WHERE parent_id IN %s', (tuple(ids),))
child_ids = map(itemgetter(0), cr.fetchall())
c_ids = child_ids
if (p_id and (p_id in c_ids)) or (obj_self.id in c_ids):
return False
while len(c_ids):
s_ids = self.search(cr, uid, [('parent_id', 'in', c_ids)])
if p_id and (p_id in s_ids):
return False
c_ids = s_ids
ids = child_ids
return True
def _check_type(self, cr, uid, ids, context=None):
if context is None:
context = {}
accounts = self.browse(cr, uid, ids, context=context)
for account in accounts:
if account.child_id and account.type not in ('view', 'consolidation'):
return False
return True
def _check_account_type(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.type in ('receivable', 'payable') and account.user_type.close_method != 'unreconciled':
return False
return True
def _check_company_account(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.parent_id:
if account.company_id != account.parent_id.company_id:
return False
return True
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id']),
(_check_type, 'Configuration Error!\nYou cannot define children to an account with internal type different of "View".', ['type']),
(_check_account_type, 'Configuration Error!\nYou cannot select an account type with a deferral method different of "Unreconciled" for accounts with internal type "Payable/Receivable".', ['user_type','type']),
(_check_company_account, 'Error!\nYou cannot create an account which has parent account of different company.', ['parent_id']),
]
_sql_constraints = [
('code_company_uniq', 'unique (code,company_id)', 'The code of the account must be unique per company !')
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
args = args[:]
ids = []
try:
if name and str(name).startswith('partner:'):
part_id = int(name.split(':')[1])
part = self.pool.get('res.partner').browse(cr, user, part_id, context=context)
args += [('id', 'in', (part.property_account_payable.id, part.property_account_receivable.id))]
name = False
if name and str(name).startswith('type:'):
type = name.split(':')[1]
args += [('type', '=', type)]
name = False
except:
pass
if name:
if operator not in expression.NEGATIVE_TERM_OPERATORS:
plus_percent = lambda n: n+'%'
code_op, code_conv = {
'ilike': ('=ilike', plus_percent),
'like': ('=like', plus_percent),
}.get(operator, (operator, lambda n: n))
ids = self.search(cr, user, ['|', ('code', code_op, code_conv(name)), '|', ('shortcut', '=', name), ('name', operator, name)]+args, limit=limit)
if not ids and len(name.split()) >= 2:
#Separating code and name of account for searching
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2)]+ args, limit=limit)
else:
ids = self.search(cr, user, ['&','!', ('code', '=like', name+"%"), ('name', operator, name)]+args, limit=limit)
# as negation want to restric, do if already have results
if ids and len(name.split()) >= 2:
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2), ('id', 'in', ids)]+ args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code'] + ' ' + name
res.append((record['id'], name))
return res
def copy(self, cr, uid, id, default=None, context=None, done_list=None, local=False):
default = {} if default is None else default.copy()
if done_list is None:
done_list = []
account = self.browse(cr, uid, id, context=context)
new_child_ids = []
default.update(code=_("%s (copy)") % (account['code'] or ''))
if not local:
done_list = []
if account.id in done_list:
return False
done_list.append(account.id)
if account:
for child in account.child_id:
child_ids = self.copy(cr, uid, child.id, default, context=context, done_list=done_list, local=True)
if child_ids:
new_child_ids.append(child_ids)
default['child_parent_ids'] = [(6, 0, new_child_ids)]
else:
default['child_parent_ids'] = False
return super(account_account, self).copy(cr, uid, id, default, context=context)
def _check_moves(self, cr, uid, ids, method, context=None):
line_obj = self.pool.get('account.move.line')
account_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
if method == 'write':
raise osv.except_osv(_('Error!'), _('You cannot deactivate an account that contains journal items.'))
elif method == 'unlink':
raise osv.except_osv(_('Error!'), _('You cannot remove an account that contains journal items.'))
#Checking whether the account is set as a property to any Partner or not
values = ['account.account,%s' % (account_id,) for account_id in ids]
partner_prop_acc = self.pool.get('ir.property').search(cr, uid, [('value_reference','in', values)], context=context)
if partner_prop_acc:
raise osv.except_osv(_('Warning!'), _('You cannot remove/deactivate an account which is set on a customer or supplier.'))
return True
def _check_allow_type_change(self, cr, uid, ids, new_type, context=None):
restricted_groups = ['consolidation','view']
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
old_type = account.type
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])])
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)]):
#Check for 'Closed' type
if old_type == 'closed' and new_type !='closed':
raise osv.except_osv(_('Warning!'), _("You cannot change the type of account from 'Closed' to any other type as it contains journal items!"))
# Forbid to change an account type for restricted_groups as it contains journal items (or if one of its children does)
if (new_type in restricted_groups):
raise osv.except_osv(_('Warning!'), _("You cannot change the type of account to '%s' type as it contains journal items!") % (new_type,))
return True
# For legal reason (forbiden to modify journal entries which belongs to a closed fy or period), Forbid to modify
# the code of an account if journal entries have been already posted on this account. This cannot be simply
# 'configurable' since it can lead to a lack of confidence in Odoo and this is what we want to change.
def _check_allow_code_change(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
raise osv.except_osv(_('Warning !'), _("You cannot change the code of account which contains journal items!"))
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
# Dont allow changing the company_id when account_move_line already exist
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('account_id', 'in', ids)], context=context)
if move_lines:
# Allow the write if the value is the same
for i in [i['company_id'][0] for i in self.read(cr,uid,ids,['company_id'], context=context)]:
if vals['company_id']!=i:
raise osv.except_osv(_('Warning!'), _('You cannot change the owner company of an account that already contains journal items.'))
if 'active' in vals and not vals['active']:
self._check_moves(cr, uid, ids, "write", context=context)
if 'type' in vals.keys():
self._check_allow_type_change(cr, uid, ids, vals['type'], context=context)
if 'code' in vals.keys():
self._check_allow_code_change(cr, uid, ids, context=context)
return super(account_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
self._check_moves(cr, uid, ids, "unlink", context=context)
return super(account_account, self).unlink(cr, uid, ids, context=context)
class account_journal(osv.osv):
_name = "account.journal"
_description = "Journal"
_columns = {
'with_last_closing_balance': fields.boolean('Opening With Last Closing Balance', help="For cash or bank journal, this option should be unchecked when the starting balance should always set to 0 for new documents."),
'name': fields.char('Journal Name', required=True),
'code': fields.char('Code', size=5, required=True, help="The code will be displayed on reports."),
'type': fields.selection([('sale', 'Sale'),('sale_refund','Sale Refund'), ('purchase', 'Purchase'), ('purchase_refund','Purchase Refund'), ('cash', 'Cash'), ('bank', 'Bank and Checks'), ('general', 'General'), ('situation', 'Opening/Closing Situation')], 'Type', size=32, required=True,
help="Select 'Sale' for customer invoices journals."\
" Select 'Purchase' for supplier invoices journals."\
" Select 'Cash' or 'Bank' for journals that are used in customer or supplier payments."\
" Select 'General' for miscellaneous operations journals."\
" Select 'Opening/Closing Situation' for entries generated for new fiscal years."),
'type_control_ids': fields.many2many('account.account.type', 'account_journal_type_rel', 'journal_id','type_id', 'Type Controls', domain=[('code','<>','view'), ('code', '<>', 'closed')]),
'account_control_ids': fields.many2many('account.account', 'account_account_type_rel', 'journal_id','account_id', 'Account', domain=[('type','<>','view'), ('type', '<>', 'closed')]),
'default_credit_account_id': fields.many2one('account.account', 'Default Credit Account', domain="[('type','!=','view')]", help="It acts as a default account for credit amount"),
'default_debit_account_id': fields.many2one('account.account', 'Default Debit Account', domain="[('type','!=','view')]", help="It acts as a default account for debit amount"),
'centralisation': fields.boolean('Centralized Counterpart', help="Check this box to determine that each entry of this journal won't create a new counterpart but will share the same counterpart. This is used in fiscal year closing."),
'update_posted': fields.boolean('Allow Cancelling Entries', help="Check this box if you want to allow the cancellation the entries related to this journal or of the invoice related to this journal"),
'group_invoice_lines': fields.boolean('Group Invoice Lines', help="If this box is checked, the system will try to group the accounting lines when generating them from invoices."),
'sequence_id': fields.many2one('ir.sequence', 'Entry Sequence', help="This field contains the information related to the numbering of the journal entries of this journal.", required=True, copy=False),
'user_id': fields.many2one('res.users', 'User', help="The user responsible for this journal"),
'groups_id': fields.many2many('res.groups', 'account_journal_group_rel', 'journal_id', 'group_id', 'Groups'),
'currency': fields.many2one('res.currency', 'Currency', help='The currency used to enter statement'),
'entry_posted': fields.boolean('Autopost Created Moves', help='Check this box to automatically post entries of this journal. Note that legally, some entries may be automatically posted when the source document is validated (Invoices), whatever the status of this field.'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, help="Company related to this journal"),
'allow_date':fields.boolean('Check Date in Period', help= 'If checked, the entry won\'t be created if the entry date is not included into the selected period'),
'profit_account_id' : fields.many2one('account.account', 'Profit Account'),
'loss_account_id' : fields.many2one('account.account', 'Loss Account'),
'internal_account_id' : fields.many2one('account.account', 'Internal Transfers Account', select=1),
'cash_control' : fields.boolean('Cash Control', help='If you want the journal should be control at opening/closing, check this option'),
}
_defaults = {
'cash_control' : False,
'with_last_closing_balance' : True,
'user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_sql_constraints = [
('code_company_uniq', 'unique (code, company_id)', 'The code of the journal must be unique per company !'),
('name_company_uniq', 'unique (name, company_id)', 'The name of the journal must be unique per company !'),
]
_order = 'code'
def _check_currency(self, cr, uid, ids, context=None):
for journal in self.browse(cr, uid, ids, context=context):
if journal.currency:
if journal.default_credit_account_id and not journal.default_credit_account_id.currency_id.id == journal.currency.id:
return False
if journal.default_debit_account_id and not journal.default_debit_account_id.currency_id.id == journal.currency.id:
return False
return True
_constraints = [
(_check_currency, 'Configuration error!\nThe currency chosen should be shared by the default accounts too.', ['currency','default_debit_account_id','default_credit_account_id']),
]
def copy(self, cr, uid, id, default=None, context=None):
default = dict(context or {})
journal = self.browse(cr, uid, id, context=context)
default.update(
code=_("%s (copy)") % (journal['code'] or ''),
name=_("%s (copy)") % (journal['name'] or ''))
return super(account_journal, self).copy(cr, uid, id, default, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
for journal in self.browse(cr, uid, ids, context=context):
if 'company_id' in vals and journal.company_id.id != vals['company_id']:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('journal_id', 'in', ids)])
if move_lines:
raise osv.except_osv(_('Warning!'), _('This journal already contains items, therefore you cannot modify its company field.'))
return super(account_journal, self).write(cr, uid, ids, vals, context=context)
def create_sequence(self, cr, uid, vals, context=None):
""" Create new no_gap entry sequence for every new Joural
"""
# in account.journal code is actually the prefix of the sequence
# whereas ir.sequence code is a key to lookup global sequences.
prefix = vals['code'].upper()
seq = {
'name': vals['name'],
'implementation':'no_gap',
'prefix': prefix + "/%(year)s/",
'padding': 4,
'number_increment': 1
}
if 'company_id' in vals:
seq['company_id'] = vals['company_id']
return self.pool.get('ir.sequence').create(cr, uid, seq)
def create(self, cr, uid, vals, context=None):
if not 'sequence_id' in vals or not vals['sequence_id']:
# if we have the right to create a journal, we should be able to
# create it's sequence.
vals.update({'sequence_id': self.create_sequence(cr, SUPERUSER_ID, vals, context)})
return super(account_journal, self).create(cr, uid, vals, context)
def name_get(self, cr, user, ids, context=None):
"""
Returns a list of tupples containing id, name.
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param ids: list of ids for which name should be read
@param context: context arguments, like lang, time zone
@return: Returns a list of tupples containing id, name
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
result = self.browse(cr, user, ids, context=context)
res = []
for rs in result:
if rs.currency:
currency = rs.currency
else:
currency = rs.company_id.currency_id
name = "%s (%s)" % (rs.name, currency.name)
res += [(rs.id, name)]
return res
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_fiscalyear(osv.osv):
_name = "account.fiscalyear"
_description = "Fiscal Year"
_columns = {
'name': fields.char('Fiscal Year', required=True),
'code': fields.char('Code', size=6, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period_ids': fields.one2many('account.period', 'fiscalyear_id', 'Periods'),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True, copy=False),
'end_journal_period_id': fields.many2one(
'account.journal.period', 'End of Year Entries Journal',
readonly=True, copy=False),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_order = "date_start, id"
def _check_duration(self, cr, uid, ids, context=None):
obj_fy = self.browse(cr, uid, ids[0], context=context)
if obj_fy.date_stop < obj_fy.date_start:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe start date of a fiscal year must precede its end date.', ['date_start','date_stop'])
]
def create_period3(self, cr, uid, ids, context=None):
return self.create_period(cr, uid, ids, context, 3)
def create_period(self, cr, uid, ids, context=None, interval=1):
period_obj = self.pool.get('account.period')
for fy in self.browse(cr, uid, ids, context=context):
ds = datetime.strptime(fy.date_start, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': "%s %s" % (_('Opening Period'), ds.strftime('%Y')),
'code': ds.strftime('00/%Y'),
'date_start': ds,
'date_stop': ds,
'special': True,
'fiscalyear_id': fy.id,
})
while ds.strftime('%Y-%m-%d') < fy.date_stop:
de = ds + relativedelta(months=interval, days=-1)
if de.strftime('%Y-%m-%d') > fy.date_stop:
de = datetime.strptime(fy.date_stop, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': ds.strftime('%m/%Y'),
'code': ds.strftime('%m/%Y'),
'date_start': ds.strftime('%Y-%m-%d'),
'date_stop': de.strftime('%Y-%m-%d'),
'fiscalyear_id': fy.id,
})
ds = ds + relativedelta(months=interval)
return True
def find(self, cr, uid, dt=None, exception=True, context=None):
res = self.finds(cr, uid, dt, exception, context=context)
return res and res[0] or False
def finds(self, cr, uid, dt=None, exception=True, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self,cr,uid,context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
ids = self.search(cr, uid, args, context=context)
if not ids:
if exception:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_fiscalyear')
msg = _('There is no period defined for this date: %s.\nPlease go to Configuration/Periods and configure a fiscal year.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
return []
return ids
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_period(osv.osv):
_name = "account.period"
_description = "Account period"
_columns = {
'name': fields.char('Period Name', required=True),
'code': fields.char('Code', size=12),
'special': fields.boolean('Opening/Closing Period',help="These periods can overlap."),
'date_start': fields.date('Start of Period', required=True, states={'done':[('readonly',True)]}),
'date_stop': fields.date('End of Period', required=True, states={'done':[('readonly',True)]}),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True, states={'done':[('readonly',True)]}, select=True),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True, copy=False,
help='When monthly periods are created. The status is \'Draft\'. At the end of monthly period it is in \'Done\' status.'),
'company_id': fields.related('fiscalyear_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'state': 'draft',
}
_order = "date_start, special desc"
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'The name of the period must be unique per company!'),
]
def _check_duration(self,cr,uid,ids,context=None):
obj_period = self.browse(cr, uid, ids[0], context=context)
if obj_period.date_stop < obj_period.date_start:
return False
return True
def _check_year_limit(self,cr,uid,ids,context=None):
for obj_period in self.browse(cr, uid, ids, context=context):
if obj_period.special:
continue
if obj_period.fiscalyear_id.date_stop < obj_period.date_stop or \
obj_period.fiscalyear_id.date_stop < obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_stop:
return False
pids = self.search(cr, uid, [('date_stop','>=',obj_period.date_start),('date_start','<=',obj_period.date_stop),('special','=',False),('id','<>',obj_period.id)])
for period in self.browse(cr, uid, pids):
if period.fiscalyear_id.company_id.id==obj_period.fiscalyear_id.company_id.id:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe duration of the Period(s) is/are invalid.', ['date_stop']),
(_check_year_limit, 'Error!\nThe period is invalid. Either some periods are overlapping or the period\'s dates are not matching the scope of the fiscal year.', ['date_stop'])
]
@api.returns('self')
def next(self, cr, uid, period, step, context=None):
ids = self.search(cr, uid, [('date_start','>',period.date_start)])
if len(ids)>=step:
return ids[step-1]
return False
@api.returns('self')
def find(self, cr, uid, dt=None, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self, cr, uid, context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
args.append(('company_id', '=', context['company_id']))
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
result = []
if context.get('account_period_prefer_normal', True):
# look for non-special periods first, and fallback to all if no result is found
result = self.search(cr, uid, args + [('special', '=', False)], context=context)
if not result:
result = self.search(cr, uid, args, context=context)
if not result:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_period')
msg = _('There is no period defined for this date: %s.\nPlease go to Configuration/Periods.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
return result
def action_draft(self, cr, uid, ids, context=None):
mode = 'draft'
for period in self.browse(cr, uid, ids):
if period.fiscalyear_id.state == 'done':
raise osv.except_osv(_('Warning!'), _('You can not re-open a period which belongs to closed fiscal year'))
cr.execute('update account_journal_period set state=%s where period_id in %s', (mode, tuple(ids),))
cr.execute('update account_period set state=%s where id in %s', (mode, tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('period_id', 'in', ids)])
if move_lines:
raise osv.except_osv(_('Warning!'), _('This journal already contains items for this period, therefore you cannot modify its company field.'))
return super(account_period, self).write(cr, uid, ids, vals, context=context)
def build_ctx_periods(self, cr, uid, period_from_id, period_to_id):
if period_from_id == period_to_id:
return [period_from_id]
period_from = self.browse(cr, uid, period_from_id)
period_date_start = period_from.date_start
company1_id = period_from.company_id.id
period_to = self.browse(cr, uid, period_to_id)
period_date_stop = period_to.date_stop
company2_id = period_to.company_id.id
if company1_id != company2_id:
raise osv.except_osv(_('Error!'), _('You should choose the periods that belong to the same company.'))
if period_date_start > period_date_stop:
raise osv.except_osv(_('Error!'), _('Start period should precede then end period.'))
# /!\ We do not include a criterion on the company_id field below, to allow producing consolidated reports
# on multiple companies. It will only work when start/end periods are selected and no fiscal year is chosen.
#for period from = january, we want to exclude the opening period (but it has same date_from, so we have to check if period_from is special or not to include that clause or not in the search).
if period_from.special:
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop)])
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop), ('special', '=', False)])
class account_journal_period(osv.osv):
_name = "account.journal.period"
_description = "Journal Period"
def _icon_get(self, cr, uid, ids, field_name, arg=None, context=None):
result = {}.fromkeys(ids, 'STOCK_NEW')
for r in self.read(cr, uid, ids, ['state']):
result[r['id']] = {
'draft': 'STOCK_NEW',
'printed': 'STOCK_PRINT_PREVIEW',
'done': 'STOCK_DIALOG_AUTHENTICATION',
}.get(r['state'], 'STOCK_NEW')
return result
_columns = {
'name': fields.char('Journal-Period Name', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, ondelete="cascade"),
'period_id': fields.many2one('account.period', 'Period', required=True, ondelete="cascade"),
'icon': fields.function(_icon_get, string='Icon', type='char'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the journal period without removing it."),
'state': fields.selection([('draft','Draft'), ('printed','Printed'), ('done','Done')], 'Status', required=True, readonly=True,
help='When journal period is created. The status is \'Draft\'. If a report is printed it comes to \'Printed\' status. When all transactions are done, it comes in \'Done\' status.'),
'fiscalyear_id': fields.related('period_id', 'fiscalyear_id', string='Fiscal Year', type='many2one', relation='account.fiscalyear'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
def _check(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
cr.execute('select * from account_move_line where journal_id=%s and period_id=%s limit 1', (obj.journal_id.id, obj.period_id.id))
res = cr.fetchall()
if res:
raise osv.except_osv(_('Error!'), _('You cannot modify/delete a journal with entries for this period.'))
return True
def write(self, cr, uid, ids, vals, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
period_id = vals.get('period_id',False)
if period_id:
period = self.pool.get('account.period').browse(cr, uid, period_id, context=context)
vals['state']=period.state
return super(account_journal_period, self).create(cr, uid, vals, context)
def unlink(self, cr, uid, ids, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).unlink(cr, uid, ids, context=context)
_defaults = {
'state': 'draft',
'active': True,
}
_order = "period_id"
#----------------------------------------------------------
# Entries
#----------------------------------------------------------
class account_move(osv.osv):
_name = "account.move"
_description = "Account Entry"
_order = 'id desc'
def account_assert_balanced(self, cr, uid, context=None):
cr.execute("""\
SELECT move_id
FROM account_move_line
WHERE state = 'valid'
GROUP BY move_id
HAVING abs(sum(debit) - sum(credit)) > 0.00001
""")
assert len(cr.fetchall()) == 0, \
"For all Journal Items, the state is valid implies that the sum " \
"of credits equals the sum of debits"
return True
def account_move_prepare(self, cr, uid, journal_id, date=False, ref='', company_id=False, context=None):
'''
Prepares and returns a dictionary of values, ready to be passed to create() based on the parameters received.
'''
if not date:
date = fields.date.today()
period_obj = self.pool.get('account.period')
if not company_id:
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = user.company_id.id
if context is None:
context = {}
#put the company in context to find the good period
ctx = context.copy()
ctx.update({'company_id': company_id})
return {
'journal_id': journal_id,
'date': date,
'period_id': period_obj.find(cr, uid, date, context=ctx)[0],
'ref': ref,
'company_id': company_id,
}
def name_get(self, cursor, user, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
res = []
data_move = self.pool.get('account.move').browse(cursor, user, ids, context=context)
for move in data_move:
if move.state=='draft':
name = '*' + str(move.id)
else:
name = move.name
res.append((move.id, name))
return res
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {})
period_ids = self.pool.get('account.period').find(cr, uid, context=ctx)
return period_ids[0]
def _amount_compute(self, cr, uid, ids, name, args, context, where =''):
if not ids: return {}
cr.execute( 'SELECT move_id, SUM(debit) '\
'FROM account_move_line '\
'WHERE move_id IN %s '\
'GROUP BY move_id', (tuple(ids),))
result = dict(cr.fetchall())
for id in ids:
result.setdefault(id, 0.0)
return result
def _search_amount(self, cr, uid, obj, name, args, context):
ids = set()
for cond in args:
amount = cond[2]
if isinstance(cond[2],(list,tuple)):
if cond[1] in ['in','not in']:
amount = tuple(cond[2])
else:
continue
else:
if cond[1] in ['=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of']:
continue
cr.execute("select move_id from account_move_line group by move_id having sum(debit) %s %%s" % (cond[1]),(amount,))
res_ids = set(id[0] for id in cr.fetchall())
ids = ids and (ids & res_ids) or res_ids
if ids:
return [('id', 'in', tuple(ids))]
return [('id', '=', '0')]
def _get_move_from_lines(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
return [line.move_id.id for line in line_obj.browse(cr, uid, ids, context=context)]
_columns = {
'name': fields.char('Number', required=True, copy=False),
'ref': fields.char('Reference', copy=False),
'period_id': fields.many2one('account.period', 'Period', required=True, states={'posted':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, states={'posted':[('readonly',True)]}),
'state': fields.selection(
[('draft','Unposted'), ('posted','Posted')], 'Status',
required=True, readonly=True, copy=False,
help='All manually created new journal entries are usually in the status \'Unposted\', '
'but you can set the option to skip that status on the related journal. '
'In that case, they will behave as journal entries automatically created by the '
'system on document validation (invoices, bank statements...) and will be created '
'in \'Posted\' status.'),
'line_id': fields.one2many('account.move.line', 'move_id', 'Entries',
states={'posted':[('readonly',True)]},
copy=True),
'to_check': fields.boolean('To Review', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.'),
'partner_id': fields.related('line_id', 'partner_id', type="many2one", relation="res.partner", string="Partner", store={
_name: (lambda self, cr,uid,ids,c: ids, ['line_id'], 10),
'account.move.line': (_get_move_from_lines, ['partner_id'],10)
}),
'amount': fields.function(_amount_compute, string='Amount', digits_compute=dp.get_precision('Account'), type='float', fnct_search=_search_amount),
'date': fields.date('Date', required=True, states={'posted':[('readonly',True)]}, select=True),
'narration':fields.text('Internal Note'),
'company_id': fields.related('journal_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'balance': fields.float('balance', digits_compute=dp.get_precision('Account'), help="This is a field only used for internal purpose and shouldn't be displayed"),
}
_defaults = {
'name': '/',
'state': 'draft',
'period_id': _get_period,
'date': fields.date.context_today,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
def _check_centralisation(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
if move.journal_id.centralisation:
move_ids = self.search(cursor, user, [
('period_id', '=', move.period_id.id),
('journal_id', '=', move.journal_id.id),
])
if len(move_ids) > 1:
return False
return True
_constraints = [
(_check_centralisation,
'You cannot create more than one move per period on a centralized journal.',
['journal_id']),
]
def post(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice = context.get('invoice', False)
valid_moves = self.validate(cr, uid, ids, context)
if not valid_moves:
raise osv.except_osv(_('Error!'), _('You cannot validate a non-balanced entry.\nMake sure you have configured payment terms properly.\nThe latest payment term line should be of the "Balance" type.'))
obj_sequence = self.pool.get('ir.sequence')
for move in self.browse(cr, uid, valid_moves, context=context):
if move.name =='/':
new_name = False
journal = move.journal_id
if invoice and invoice.internal_number:
new_name = invoice.internal_number
else:
if journal.sequence_id:
c = {'fiscalyear_id': move.period_id.fiscalyear_id.id}
new_name = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, c)
else:
raise osv.except_osv(_('Error!'), _('Please define a sequence on the journal.'))
if new_name:
self.write(cr, uid, [move.id], {'name':new_name})
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s',
('posted', tuple(valid_moves),))
self.invalidate_cache(cr, uid, context=context)
return True
def button_validate(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
# check that all accounts have the same topmost ancestor
top_common = None
for line in move.line_id:
account = line.account_id
top_account = account
while top_account.parent_id:
top_account = top_account.parent_id
if not top_common:
top_common = top_account
elif top_account.id != top_common.id:
raise osv.except_osv(_('Error!'),
_('You cannot validate this journal entry because account "%s" does not belong to chart of accounts "%s".') % (account.name, top_common.name))
return self.post(cursor, user, ids, context=context)
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if not line.journal_id.update_posted:
raise osv.except_osv(_('Error!'), _('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if ids:
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
c = context.copy()
c['novalidate'] = True
result = super(account_move, self).write(cr, uid, ids, vals, c)
self.validate(cr, uid, ids, context=context)
return result
#
# TODO: Check if period is closed !
#
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('line_id'):
if vals.get('journal_id'):
for l in vals['line_id']:
if not l[0]:
l[2]['journal_id'] = vals['journal_id']
context['journal_id'] = vals['journal_id']
if 'period_id' in vals:
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = vals['period_id']
context['period_id'] = vals['period_id']
else:
default_period = self._get_period(cr, uid, context)
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = default_period
context['period_id'] = default_period
c = context.copy()
c['novalidate'] = True
c['period_id'] = vals['period_id'] if 'period_id' in vals else self._get_period(cr, uid, context)
c['journal_id'] = vals['journal_id']
if 'date' in vals: c['date'] = vals['date']
result = super(account_move, self).create(cr, uid, vals, c)
tmp = self.validate(cr, uid, [result], context)
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context)
if journal.entry_posted and tmp:
self.button_validate(cr,uid, [result], context)
else:
result = super(account_move, self).create(cr, uid, vals, context)
return result
def unlink(self, cr, uid, ids, context=None, check=True):
context = dict(context or {})
if isinstance(ids, (int, long)):
ids = [ids]
toremove = []
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context=context):
if move['state'] != 'draft':
raise osv.except_osv(_('User Error!'),
_('You cannot delete a posted journal entry "%s".') % \
move['name'])
for line in move.line_id:
if line.invoice:
raise osv.except_osv(_('User Error!'),
_("Move cannot be deleted if linked to an invoice. (Invoice: %s - Move ID:%s)") % \
(line.invoice.number,move.name))
line_ids = map(lambda x: x.id, move.line_id)
context['journal_id'] = move.journal_id.id
context['period_id'] = move.period_id.id
obj_move_line._update_check(cr, uid, line_ids, context)
obj_move_line.unlink(cr, uid, line_ids, context=context)
toremove.append(move.id)
result = super(account_move, self).unlink(cr, uid, toremove, context)
return result
def _compute_balance(self, cr, uid, id, context=None):
move = self.browse(cr, uid, id, context=context)
amount = 0
for line in move.line_id:
amount+= (line.debit - line.credit)
return amount
def _centralise(self, cr, uid, move, mode, context=None):
assert mode in ('debit', 'credit'), 'Invalid Mode' #to prevent sql injection
currency_obj = self.pool.get('res.currency')
account_move_line_obj = self.pool.get('account.move.line')
context = dict(context or {})
if mode=='credit':
account_id = move.journal_id.default_debit_account_id.id
mode2 = 'debit'
if not account_id:
raise osv.except_osv(_('User Error!'),
_('There is no default debit account defined \n' \
'on journal "%s".') % move.journal_id.name)
else:
account_id = move.journal_id.default_credit_account_id.id
mode2 = 'credit'
if not account_id:
raise osv.except_osv(_('User Error!'),
_('There is no default credit account defined \n' \
'on journal "%s".') % move.journal_id.name)
# find the first line of this move with the current mode
# or create it if it doesn't exist
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode))
res = cr.fetchone()
if res:
line_id = res[0]
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = account_move_line_obj.create(cr, uid, {
'name': _(mode.capitalize()+' Centralisation'),
'centralisation': mode,
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
}, context)
# find the first line of this move with the other mode
# so that we can exclude it from our calculation
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode2))
res = cr.fetchone()
if res:
line_id2 = res[0]
else:
line_id2 = 0
cr.execute('SELECT SUM(%s) FROM account_move_line WHERE move_id=%%s AND id!=%%s' % (mode,), (move.id, line_id2))
result = cr.fetchone()[0] or 0.0
cr.execute('update account_move_line set '+mode2+'=%s where id=%s', (result, line_id))
account_move_line_obj.invalidate_cache(cr, uid, [mode2], [line_id], context=context)
#adjust also the amount in currency if needed
cr.execute("select currency_id, sum(amount_currency) as amount_currency from account_move_line where move_id = %s and currency_id is not null group by currency_id", (move.id,))
for row in cr.dictfetchall():
currency_id = currency_obj.browse(cr, uid, row['currency_id'], context=context)
if not currency_obj.is_zero(cr, uid, currency_id, row['amount_currency']):
amount_currency = row['amount_currency'] * -1
account_id = amount_currency > 0 and move.journal_id.default_debit_account_id.id or move.journal_id.default_credit_account_id.id
cr.execute('select id from account_move_line where move_id=%s and centralisation=\'currency\' and currency_id = %slimit 1', (move.id, row['currency_id']))
res = cr.fetchone()
if res:
cr.execute('update account_move_line set amount_currency=%s , account_id=%s where id=%s', (amount_currency, account_id, res[0]))
account_move_line_obj.invalidate_cache(cr, uid, ['amount_currency', 'account_id'], [res[0]], context=context)
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = account_move_line_obj.create(cr, uid, {
'name': _('Currency Adjustment'),
'centralisation': 'currency',
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
'currency_id': row['currency_id'],
'amount_currency': amount_currency,
}, context)
return True
#
# Validate a balanced move. If it is a centralised journal, create a move.
#
def validate(self, cr, uid, ids, context=None):
if context and ('__last_update' in context):
del context['__last_update']
valid_moves = [] #Maintains a list of moves which can be responsible to create analytic entries
obj_analytic_line = self.pool.get('account.analytic.line')
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context):
journal = move.journal_id
amount = 0
line_ids = []
line_draft_ids = []
company_id = None
for line in move.line_id:
amount += line.debit - line.credit
line_ids.append(line.id)
if line.state=='draft':
line_draft_ids.append(line.id)
if not company_id:
company_id = line.account_id.company_id.id
if not company_id == line.account_id.company_id.id:
raise osv.except_osv(_('Error!'), _("Cannot create moves for different companies."))
if line.account_id.currency_id and line.currency_id:
if line.account_id.currency_id.id != line.currency_id.id and (line.account_id.currency_id.id != line.account_id.company_id.currency_id.id):
raise osv.except_osv(_('Error!'), _("""Cannot create move with currency different from ..""") % (line.account_id.code, line.account_id.name))
if abs(amount) < 10 ** -4:
# If the move is balanced
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
# Check whether the move lines are confirmed
if not line_draft_ids:
continue
# Update the move lines (set them as valid)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
account = {}
account2 = {}
if journal.type in ('purchase','sale'):
for line in move.line_id:
code = amount = 0
key = (line.account_id.id, line.tax_code_id.id)
if key in account2:
code = account2[key][0]
amount = account2[key][1] * (line.debit + line.credit)
elif line.account_id.id in account:
code = account[line.account_id.id][0]
amount = account[line.account_id.id][1] * (line.debit + line.credit)
if (code or amount) and not (line.tax_code_id or line.tax_amount):
obj_move_line.write(cr, uid, [line.id], {
'tax_code_id': code,
'tax_amount': amount
}, context, check=False)
elif journal.centralisation:
# If the move is not balanced, it must be centralised...
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
#
# Update the move lines (set them as valid)
#
self._centralise(cr, uid, move, 'debit', context=context)
self._centralise(cr, uid, move, 'credit', context=context)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
else:
# We can't validate it (it's unbalanced)
# Setting the lines as draft
not_draft_line_ids = list(set(line_ids) - set(line_draft_ids))
if not_draft_line_ids:
obj_move_line.write(cr, uid, not_draft_line_ids, {
'state': 'draft'
}, context, check=False)
# Create analytic lines for the valid moves
for record in valid_moves:
obj_move_line.create_analytic_lines(cr, uid, [line.id for line in record.line_id], context)
valid_moves = [move.id for move in valid_moves]
return len(valid_moves) > 0 and valid_moves or False
class account_move_reconcile(osv.osv):
_name = "account.move.reconcile"
_description = "Account Reconciliation"
_columns = {
'name': fields.char('Name', required=True),
'type': fields.char('Type', required=True),
'line_id': fields.one2many('account.move.line', 'reconcile_id', 'Entry Lines'),
'line_partial_ids': fields.one2many('account.move.line', 'reconcile_partial_id', 'Partial Entry lines'),
'create_date': fields.date('Creation date', readonly=True),
'opening_reconciliation': fields.boolean('Opening Entries Reconciliation', help="Is this reconciliation produced by the opening of a new fiscal year ?."),
}
_defaults = {
'name': lambda self,cr,uid,ctx=None: self.pool.get('ir.sequence').get(cr, uid, 'account.reconcile', context=ctx) or '/',
}
# You cannot unlink a reconciliation if it is a opening_reconciliation one,
# you should use the generate opening entries wizard for that
def unlink(self, cr, uid, ids, context=None):
for move_rec in self.browse(cr, uid, ids, context=context):
if move_rec.opening_reconciliation:
raise osv.except_osv(_('Error!'), _('You cannot unreconcile journal items if they has been generated by the \
opening/closing fiscal year process.'))
return super(account_move_reconcile, self).unlink(cr, uid, ids, context=context)
# Look in the line_id and line_partial_ids to ensure the partner is the same or empty
# on all lines. We allow that only for opening/closing period
def _check_same_partner(self, cr, uid, ids, context=None):
for reconcile in self.browse(cr, uid, ids, context=context):
move_lines = []
if not reconcile.opening_reconciliation:
if reconcile.line_id:
first_partner = reconcile.line_id[0].partner_id.id
move_lines = reconcile.line_id
elif reconcile.line_partial_ids:
first_partner = reconcile.line_partial_ids[0].partner_id.id
move_lines = reconcile.line_partial_ids
if any([(line.account_id.type in ('receivable', 'payable') and line.partner_id.id != first_partner) for line in move_lines]):
return False
return True
_constraints = [
(_check_same_partner, 'You can only reconcile journal items with the same partner.', ['line_id', 'line_partial_ids']),
]
def reconcile_partial_check(self, cr, uid, ids, type='auto', context=None):
total = 0.0
for rec in self.browse(cr, uid, ids, context=context):
for line in rec.line_partial_ids:
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if not total:
self.pool.get('account.move.line').write(cr, uid,
map(lambda x: x.id, rec.line_partial_ids),
{'reconcile_id': rec.id },
context=context
)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for r in self.browse(cr, uid, ids, context=context):
total = reduce(lambda y,t: (t.debit or 0.0) - (t.credit or 0.0) + y, r.line_partial_ids, 0.0)
if total:
name = '%s (%.2f)' % (r.name, total)
result.append((r.id,name))
else:
result.append((r.id,r.name))
return result
#----------------------------------------------------------
# Tax
#----------------------------------------------------------
"""
a documenter
child_depend: la taxe depend des taxes filles
"""
class account_tax_code(osv.osv):
"""
A code for the tax object.
This code is used for some tax declarations.
"""
def _sum(self, cr, uid, ids, name, args, context, where ='', where_params=()):
parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
if context.get('based_on', 'invoices') == 'payments':
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
LEFT JOIN account_invoice invoice ON \
(invoice.move_id = move.id) \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
AND ((invoice.state = \'paid\') \
OR (invoice.id IS NULL)) \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
else:
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
res=dict(cr.fetchall())
obj_precision = self.pool.get('decimal.precision')
res2 = {}
for record in self.browse(cr, uid, ids, context=context):
def _rec_get(record):
amount = res.get(record.id, 0.0)
for rec in record.child_ids:
amount += _rec_get(rec) * rec.sign
return amount
res2[record.id] = round(_rec_get(record), obj_precision.precision_get(cr, uid, 'Account'))
return res2
def _sum_year(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', 'all') == 'all':
move_state = ('draft', 'posted', )
if context.get('fiscalyear_id', False):
fiscalyear_id = [context['fiscalyear_id']]
else:
fiscalyear_id = self.pool.get('account.fiscalyear').finds(cr, uid, exception=False)
where = ''
where_params = ()
if fiscalyear_id:
pids = []
for fy in fiscalyear_id:
pids += map(lambda x: str(x.id), self.pool.get('account.fiscalyear').browse(cr, uid, fy).period_ids)
if pids:
where = ' AND line.period_id IN %s AND move.state IN %s '
where_params = (tuple(pids), move_state)
return self._sum(cr, uid, ids, name, args, context,
where=where, where_params=where_params)
def _sum_period(self, cr, uid, ids, name, args, context):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', False) == 'all':
move_state = ('draft', 'posted', )
if context.get('period_id', False):
period_id = context['period_id']
else:
period_id = self.pool.get('account.period').find(cr, uid, context=context)
if not period_id:
return dict.fromkeys(ids, 0.0)
period_id = period_id[0]
return self._sum(cr, uid, ids, name, args, context,
where=' AND line.period_id=%s AND move.state IN %s', where_params=(period_id, move_state))
_name = 'account.tax.code'
_description = 'Tax Code'
_rec_name = 'code'
_order = 'sequence, code'
_columns = {
'name': fields.char('Tax Case Name', required=True, translate=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'sum': fields.function(_sum_year, string="Year Sum"),
'sum_period': fields.function(_sum_period, string="Period Sum"),
'parent_id': fields.many2one('account.tax.code', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code', 'parent_id', 'Child Codes'),
'line_ids': fields.one2many('account.move.line', 'tax_code_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'sign': fields.float('Coefficent for parent', required=True, help='You can specify here the coefficient that will be used when consolidating the amount of this case into its parent. For example, set 1/-1 if you want to add/substract it.'),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax code to appear on invoices"),
'sequence': fields.integer('Sequence', help="Determine the display order in the report 'Accounting \ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context=context, load='_classic_write')
return [(x['id'], (x['code'] and (x['code'] + ' - ') or '') + x['name']) \
for x in reads]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'company_id': _default_company,
'sign': 1.0,
'notprintable': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id'])
]
_order = 'code'
def get_precision_tax():
def change_digit_tax(cr):
res = openerp.registry(cr.dbname)['decimal.precision'].precision_get(cr, SUPERUSER_ID, 'Account')
return (16, res+3)
return change_digit_tax
class account_tax(osv.osv):
"""
A tax object.
Type: percent, fixed, none, code
PERCENT: tax = price * amount
FIXED: tax = price + amount
NONE: no tax line
CODE: execute python code. localcontext = {'price_unit':pu}
return result in the context
Ex: result=round(price_unit*0.21,4)
"""
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
this = self.browse(cr, uid, id, context=context)
tmp_default = dict(default, name=_("%s (Copy)") % this.name)
return super(account_tax, self).copy_data(cr, uid, id, default=tmp_default, context=context)
_name = 'account.tax'
_description = 'Tax'
_columns = {
'name': fields.char('Tax Name', required=True, translate=True, help="This name will be displayed on reports"),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the tax lines from the lowest sequences to the higher ones. The order is important if you have a tax with several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For taxes of type percentage, enter % ratio between 0-1."),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the tax without removing it."),
'type': fields.selection( [('percent','Percentage'), ('fixed','Fixed Amount'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
'applicable_type': fields.selection( [('true','Always'), ('code','Given by Python Code')], 'Applicability', required=True,
help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account', 'Invoice Tax Account', help="Set the account that will be set by default on invoice tax lines for invoices. Leave empty to use the expense account."),
'account_paid_id':fields.many2one('account.account', 'Refund Tax Account', help="Set the account that will be set by default on invoice tax lines for refunds. Leave empty to use the expense account."),
'account_analytic_collected_id':fields.many2one('account.analytic.account', 'Invoice Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for invoices. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'account_analytic_paid_id':fields.many2one('account.analytic.account', 'Refund Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for refunds. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'parent_id':fields.many2one('account.tax', 'Parent Tax Account', select=True),
'child_ids':fields.one2many('account.tax', 'parent_id', 'Child Tax Accounts'),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code', 'Account Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code', 'Account Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'include_base_amount': fields.boolean('Included in base amount', help="Indicates if the amount of tax must be included in the base amount for the computation of the next taxes"),
'company_id': fields.many2one('res.company', 'Company', required=True),
'description': fields.char('Tax Code'),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Application', required=True)
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'Tax Name must be unique per company!'),
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
"""
Returns a list of tupples containing id, name, as internally it is called {def name_get}
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param name: name to search
@param args: other arguments
@param operator: default operator is 'ilike', it can be changed
@param context: context arguments, like lang, time zone
@param limit: Returns first 'n' ids of complete result, default is 80.
@return: Returns a list of tupples containing id and name
"""
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('description', operator, name), ('name', operator, name)]
else:
domain = ['|', ('description', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('type', False) and vals['type'] in ('none', 'code'):
vals.update({'amount': 0.0})
return super(account_tax, self).write(cr, uid, ids, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('type'):
if context.get('type') in ('out_invoice','out_refund'):
args += [('type_tax_use','in',['sale','all'])]
elif context.get('type') in ('in_invoice','in_refund'):
args += [('type_tax_use','in',['purchase','all'])]
if context.get('journal_id'):
journal = journal_pool.browse(cr, uid, context.get('journal_id'))
if journal.type in ('sale', 'purchase'):
args += [('type_tax_use','in',[journal.type,'all'])]
return super(account_tax, self).search(cr, uid, args, offset, limit, order, context, count)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': '''# price_unit\n# or False\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'price_include': 0,
'active': 1,
'type_tax_use': 'all',
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'company_id': _default_company,
}
_order = 'sequence'
def _applicable(self, cr, uid, taxes, price_unit, product=None, partner=None):
res = []
for tax in taxes:
if tax.applicable_type=='code':
localdict = {'price_unit':price_unit, 'product':product, 'partner':partner}
exec tax.python_applicable in localdict
if localdict.get('result', False):
res.append(tax)
else:
res.append(tax)
return res
def _unit_compute(self, cr, uid, taxes, price_unit, product=None, partner=None, quantity=0):
taxes = self._applicable(cr, uid, taxes, price_unit ,product, partner)
res = []
cur_price_unit=price_unit
for tax in taxes:
# we compute the amount for the current tax object and append it to the result
data = {'id':tax.id,
'name':tax.description and tax.description + " - " + tax.name or tax.name,
'account_collected_id':tax.account_collected_id.id,
'account_paid_id':tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
}
res.append(data)
if tax.type=='percent':
amount = cur_price_unit * tax.amount
data['amount'] = amount
elif tax.type=='fixed':
data['amount'] = tax.amount
data['tax_amount']=quantity
# data['amount'] = quantity
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner, 'quantity': quantity}
exec tax.python_compute in localdict
amount = localdict['result']
data['amount'] = amount
elif tax.type=='balance':
data['amount'] = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
data['balance'] = cur_price_unit
amount2 = data.get('amount', 0.0)
if tax.child_ids:
if tax.child_depend:
latest = res.pop()
amount = amount2
child_tax = self._unit_compute(cr, uid, tax.child_ids, amount, product, partner, quantity)
res.extend(child_tax)
for child in child_tax:
amount2 += child.get('amount', 0.0)
if tax.child_depend:
for r in res:
for name in ('base','ref_base'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['price_unit'] = latest['price_unit']
latest[name+'_code_id'] = False
for name in ('tax','ref_tax'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['amount'] = data['amount']
latest[name+'_code_id'] = False
if tax.include_base_amount:
cur_price_unit+=amount2
return res
def compute_for_bank_reconciliation(self, cr, uid, tax_id, amount, context=None):
""" Called by RPC by the bank statement reconciliation widget """
tax = self.browse(cr, uid, tax_id, context=context)
return self.compute_all(cr, uid, [tax], amount, 1) # TOCHECK may use force_exclude parameter
@api.v7
def compute_all(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, force_excluded=False):
"""
:param force_excluded: boolean used to say that we don't want to consider the value of field price_include of
tax. It's used in encoding by line where you don't matter if you encoded a tax with that boolean to True or
False
RETURN: {
'total': 0.0, # Total without taxes
'total_included: 0.0, # Total with taxes
'taxes': [] # List of taxes, see compute for the format
}
"""
# By default, for each tax, tax amount will first be computed
# and rounded at the 'Account' decimal precision for each
# PO/SO/invoice line and then these rounded amounts will be
# summed, leading to the total amount for that tax. But, if the
# company has tax_calculation_rounding_method = round_globally,
# we still follow the same method, but we use a much larger
# precision when we round the tax amount for each line (we use
# the 'Account' decimal precision + 5), and that way it's like
# rounding after the sum of the tax amounts of each line
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
tax_compute_precision = precision
if taxes and taxes[0].company_id.tax_calculation_rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(price_unit * quantity, precision)
tin = []
tex = []
for tax in taxes:
if not tax.price_include or force_excluded:
tex.append(tax)
else:
tin.append(tax)
tin = self.compute_inv(cr, uid, tin, price_unit, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex/quantity
except:
pass
tex = self._compute(cr, uid, tex, totlex_qty, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tex:
totalin += r.get('amount', 0.0)
return {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
@api.v8
def compute_all(self, price_unit, quantity, product=None, partner=None, force_excluded=False):
return self._model.compute_all(
self._cr, self._uid, self, price_unit, quantity,
product=product, partner=partner, force_excluded=force_excluded)
def compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None):
_logger.warning("Deprecated, use compute_all(...)['taxes'] instead of compute(...) to manage prices with tax included.")
return self._compute(cr, uid, taxes, price_unit, quantity, product, partner)
def _compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute(cr, uid, taxes, price_unit, product, partner, quantity)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r.get('balance', 0.0) * quantity, precision) - total
else:
r['amount'] = round(r.get('amount', 0.0) * quantity, precision)
total += r['amount']
return res
def _unit_compute_inv(self, cr, uid, taxes, price_unit, product=None, partner=None):
taxes = self._applicable(cr, uid, taxes, price_unit, product, partner)
res = []
taxes.reverse()
cur_price_unit = price_unit
tax_parent_tot = 0.0
for tax in taxes:
if (tax.type=='percent') and not tax.include_base_amount:
tax_parent_tot += tax.amount
for tax in taxes:
if (tax.type=='fixed') and not tax.include_base_amount:
cur_price_unit -= tax.amount
for tax in taxes:
if tax.type=='percent':
if tax.include_base_amount:
amount = cur_price_unit - (cur_price_unit / (1 + tax.amount))
else:
amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
elif tax.type=='fixed':
amount = tax.amount
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute_inv in localdict
amount = localdict['result']
elif tax.type=='balance':
amount = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
if tax.include_base_amount:
cur_price_unit -= amount
todo = 0
else:
todo = 1
res.append({
'id': tax.id,
'todo': todo,
'name': tax.name,
'amount': amount,
'account_collected_id': tax.account_collected_id.id,
'account_paid_id': tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
})
if tax.child_ids:
if tax.child_depend:
del res[-1]
amount = price_unit
parent_tax = self._unit_compute_inv(cr, uid, tax.child_ids, amount, product, partner)
res.extend(parent_tax)
total = 0.0
for r in res:
if r['todo']:
total += r['amount']
for r in res:
r['price_unit'] -= total
r['todo'] = 0
return res
def compute_inv(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
Price Unit is a Tax included price
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute_inv(cr, uid, taxes, price_unit, product, partner=None)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r['balance'] * quantity, precision) - total
else:
r['amount'] = round(r['amount'] * quantity, precision)
total += r['amount']
return res
# ---------------------------------------------------------
# Account Entries Models
# ---------------------------------------------------------
class account_model(osv.osv):
_name = "account.model"
_description = "Account Model"
_columns = {
'name': fields.char('Model Name', required=True, help="This is a model for recurring accounting entries"),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'lines_id': fields.one2many('account.model.line', 'model_id', 'Model Entries', copy=True),
'legend': fields.text('Legend', readonly=True, size=100),
}
_defaults = {
'legend': lambda self, cr, uid, context:_('You can specify year, month and date in the name of the model using the following labels:\n\n%(year)s: To Specify Year \n%(month)s: To Specify Month \n%(date)s: Current Date\n\ne.g. My model on %(date)s'),
}
def generate(self, cr, uid, ids, data=None, context=None):
if data is None:
data = {}
move_ids = []
entry = {}
account_move_obj = self.pool.get('account.move')
account_move_line_obj = self.pool.get('account.move.line')
pt_obj = self.pool.get('account.payment.term')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
if data.get('date', False):
context = dict(context)
context.update({'date': data['date']})
move_date = context.get('date', time.strftime('%Y-%m-%d'))
move_date = datetime.strptime(move_date,"%Y-%m-%d")
for model in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'company_id': model.company_id.id})
period_ids = period_obj.find(cr, uid, dt=context.get('date', False), context=ctx)
period_id = period_ids and period_ids[0] or False
ctx.update({'journal_id': model.journal_id.id,'period_id': period_id})
try:
entry['name'] = model.name%{'year': move_date.strftime('%Y'), 'month': move_date.strftime('%m'), 'date': move_date.strftime('%Y-%m')}
except:
raise osv.except_osv(_('Wrong Model!'), _('You have a wrong expression "%(...)s" in your model!'))
move_id = account_move_obj.create(cr, uid, {
'ref': entry['name'],
'period_id': period_id,
'journal_id': model.journal_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context))
})
move_ids.append(move_id)
for line in model.lines_id:
analytic_account_id = False
if line.analytic_account_id:
if not model.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (model.journal_id.name,))
analytic_account_id = line.analytic_account_id.id
val = {
'move_id': move_id,
'journal_id': model.journal_id.id,
'period_id': period_id,
'analytic_account_id': analytic_account_id
}
date_maturity = context.get('date',time.strftime('%Y-%m-%d'))
if line.date_maturity == 'partner':
if not line.partner_id:
raise osv.except_osv(_('Error!'), _("Maturity date of entry line generated by model line '%s' of model '%s' is based on partner payment term!" \
"\nPlease define partner on it!")%(line.name, model.name))
payment_term_id = False
if model.journal_id.type in ('purchase', 'purchase_refund') and line.partner_id.property_supplier_payment_term:
payment_term_id = line.partner_id.property_supplier_payment_term.id
elif line.partner_id.property_payment_term:
payment_term_id = line.partner_id.property_payment_term.id
if payment_term_id:
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_maturity)
if pterm_list:
pterm_list = [l[0] for l in pterm_list]
pterm_list.sort()
date_maturity = pterm_list[-1]
val.update({
'name': line.name,
'quantity': line.quantity,
'debit': line.debit,
'credit': line.credit,
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.partner_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context)),
'date_maturity': date_maturity
})
account_move_line_obj.create(cr, uid, val, context=ctx)
return move_ids
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
company_id = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.company_id.id:
company_id = journal.company_id.id
return {'value': {'company_id': company_id}}
class account_model_line(osv.osv):
_name = "account.model.line"
_description = "Account Model Entries"
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the resources from lower sequences to higher ones."),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Account'), help="The optional quantity on entries."),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade"),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete="cascade"),
'model_id': fields.many2one('account.model', 'Model', required=True, ondelete="cascade", select=True),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'date_maturity': fields.selection([('today','Date of the day'), ('partner','Partner Payment Term')], 'Maturity Date', help="The maturity date of the generated entries for this model. You can choose between the creation date or the creation date of the entries plus the partner payment terms."),
}
_order = 'sequence'
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in model, they must be positive!'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in model, they must be positive!'),
]
# ---------------------------------------------------------
# Account Subscription
# ---------------------------------------------------------
class account_subscription(osv.osv):
_name = "account.subscription"
_description = "Account Subscription"
_columns = {
'name': fields.char('Name', required=True),
'ref': fields.char('Reference'),
'model_id': fields.many2one('account.model', 'Model', required=True),
'date_start': fields.date('Start Date', required=True),
'period_total': fields.integer('Number of Periods', required=True),
'period_nbr': fields.integer('Period', required=True),
'period_type': fields.selection([('day','days'),('month','month'),('year','year')], 'Period Type', required=True),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', required=True, readonly=True, copy=False),
'lines_id': fields.one2many('account.subscription.line', 'subscription_id', 'Subscription Lines', copy=True)
}
_defaults = {
'date_start': fields.date.context_today,
'period_type': 'month',
'period_total': 12,
'period_nbr': 1,
'state': 'draft',
}
def state_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return False
def check(self, cr, uid, ids, context=None):
todone = []
for sub in self.browse(cr, uid, ids, context=context):
ok = True
for line in sub.lines_id:
if not line.move_id.id:
ok = False
break
if ok:
todone.append(sub.id)
if todone:
self.write(cr, uid, todone, {'state':'done'})
return False
def remove_line(self, cr, uid, ids, context=None):
toremove = []
for sub in self.browse(cr, uid, ids, context=context):
for line in sub.lines_id:
if not line.move_id.id:
toremove.append(line.id)
if toremove:
self.pool.get('account.subscription.line').unlink(cr, uid, toremove)
self.write(cr, uid, ids, {'state':'draft'})
return False
def compute(self, cr, uid, ids, context=None):
for sub in self.browse(cr, uid, ids, context=context):
ds = sub.date_start
for i in range(sub.period_total):
self.pool.get('account.subscription.line').create(cr, uid, {
'date': ds,
'subscription_id': sub.id,
})
if sub.period_type=='day':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(days=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='month':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(months=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='year':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(years=sub.period_nbr)).strftime('%Y-%m-%d')
self.write(cr, uid, ids, {'state':'running'})
return True
class account_subscription_line(osv.osv):
_name = "account.subscription.line"
_description = "Account Subscription Line"
_columns = {
'subscription_id': fields.many2one('account.subscription', 'Subscription', required=True, select=True),
'date': fields.date('Date', required=True),
'move_id': fields.many2one('account.move', 'Entry'),
}
def move_create(self, cr, uid, ids, context=None):
tocheck = {}
all_moves = []
obj_model = self.pool.get('account.model')
for line in self.browse(cr, uid, ids, context=context):
data = {
'date': line.date,
}
move_ids = obj_model.generate(cr, uid, [line.subscription_id.model_id.id], data, context)
tocheck[line.subscription_id.id] = True
self.write(cr, uid, [line.id], {'move_id':move_ids[0]})
all_moves.extend(move_ids)
if tocheck:
self.pool.get('account.subscription').check(cr, uid, tocheck.keys(), context)
return all_moves
_rec_name = 'date'
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class account_tax_template(osv.osv):
_name = 'account.tax.template'
class account_account_template(osv.osv):
_order = "code"
_name = "account.account.template"
_description ='Templates for Accounts'
_columns = {
'name': fields.char('Name', required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('receivable','Receivable'),
('payable','Payable'),
('view','View'),
('consolidation','Consolidation'),
('liquidity','Liquidity'),
('other','Regular'),
('closed','Closed'),
], 'Internal Type', required=True,help="This type is used to differentiate types with "\
"special effects in Odoo: view can not have entries, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_template_financial_report', 'account_template_id', 'report_line_id', 'Financial Reports'),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this option if you want the user to reconcile entries in this account."),
'shortcut': fields.char('Shortcut', size=12),
'note': fields.text('Note'),
'parent_id': fields.many2one('account.account.template', 'Parent Account Template', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids':fields.one2many('account.account.template', 'parent_id', 'Children'),
'tax_ids': fields.many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', 'Default Taxes'),
'nocreate': fields.boolean('Optional create', help="If checked, the new chart of accounts will not contain this by default."),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times)."),
}
_defaults = {
'reconcile': False,
'type': 'view',
'nocreate': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive account templates.', ['parent_id']),
]
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code']+' '+name
res.append((record['id'],name ))
return res
def generate_account(self, cr, uid, chart_template_id, tax_template_ref, acc_template_ref, code_digits, company_id, context=None):
"""
This method for generating accounts from templates.
:param chart_template_id: id of the chart template chosen in the wizard
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:paramacc_template_ref: dictionary with the mappping between the account templates and the real accounts.
:param code_digits: number of digits got from wizard.multi.charts.accounts, this is use for account code.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
if context is None:
context = {}
obj_acc = self.pool.get('account.account')
company_name = self.pool.get('res.company').browse(cr, uid, company_id, context=context).name
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
#deactivate the parent_store functionnality on account_account for rapidity purpose
ctx = context.copy()
ctx.update({'defer_parent_store_computation': True})
level_ref = {}
children_acc_criteria = [('chart_template_id','=', chart_template_id)]
if template.account_root_id.id:
children_acc_criteria = ['|'] + children_acc_criteria + ['&',('parent_id','child_of', [template.account_root_id.id]),('chart_template_id','=', False)]
children_acc_template = self.search(cr, uid, [('nocreate','!=',True)] + children_acc_criteria, order='id')
for account_template in self.browse(cr, uid, children_acc_template, context=context):
# skip the root of COA if it's not the main one
if (template.account_root_id.id == account_template.id) and template.parent_id:
continue
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits and account_template.type != 'view':
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
parent_id = account_template.parent_id and ((account_template.parent_id.id in acc_template_ref) and acc_template_ref[account_template.parent_id.id]) or False
#the level as to be given as well at the creation time, because of the defer_parent_store_computation in
#context. Indeed because of this, the parent_left and parent_right are not computed and thus the child_of
#operator does not return the expected values, with result of having the level field not computed at all.
if parent_id:
level = parent_id in level_ref and level_ref[parent_id] + 1 or obj_acc._get_level(cr, uid, [parent_id], 'level', None, context=context)[parent_id] + 1
else:
level = 0
vals={
'name': (template.account_root_id.id == account_template.id) and company_name or account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'financial_report_ids': account_template.financial_report_ids and [(6,0,[x.id for x in account_template.financial_report_ids])] or False,
'parent_id': parent_id,
'tax_ids': [(6,0,tax_ids)],
'company_id': company_id,
'level': level,
}
new_account = obj_acc.create(cr, uid, vals, context=ctx)
acc_template_ref[account_template.id] = new_account
level_ref[new_account] = level
#reactivate the parent_store functionnality on account_account
obj_acc._parent_store_compute(cr)
return acc_template_ref
class account_add_tmpl_wizard(osv.osv_memory):
"""Add one more account from the template.
With the 'nocreate' option, some accounts may not be created. Use this to add them later."""
_name = 'account.addtmpl.wizard'
def _get_def_cparent(self, cr, uid, context=None):
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
tids = tmpl_obj.read(cr, uid, [context['tmpl_ids']], ['parent_id'])
if not tids or not tids[0]['parent_id']:
return False
ptids = tmpl_obj.read(cr, uid, [tids[0]['parent_id'][0]], ['code'])
res = None
if not ptids or not ptids[0]['code']:
raise osv.except_osv(_('Error!'), _('There is no parent code for the template account.'))
res = acc_obj.search(cr, uid, [('code','=',ptids[0]['code'])])
return res and res[0] or False
_columns = {
'cparent_id':fields.many2one('account.account', 'Parent target', help="Creates an account with the selected template under this existing parent.", required=True),
}
_defaults = {
'cparent_id': _get_def_cparent,
}
def action_create(self,cr,uid,ids,context=None):
if context is None:
context = {}
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
data = self.read(cr, uid, ids)[0]
company_id = acc_obj.read(cr, uid, [data['cparent_id'][0]], ['company_id'])[0]['company_id'][0]
account_template = tmpl_obj.browse(cr, uid, context['tmpl_ids'])
vals = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': account_template.code,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'parent_id': data['cparent_id'][0],
'company_id': company_id,
}
acc_obj.create(cr, uid, vals)
return {'type':'state', 'state': 'end' }
def action_cancel(self, cr, uid, ids, context=None):
return { 'type': 'state', 'state': 'end' }
class account_tax_code_template(osv.osv):
_name = 'account.tax.code.template'
_description = 'Tax Code Template'
_order = 'sequence, code'
_rec_name = 'code'
_columns = {
'name': fields.char('Tax Case Name', required=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'parent_id': fields.many2one('account.tax.code.template', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code.template', 'parent_id', 'Child Codes'),
'sign': fields.float('Sign For Parent', required=True),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax Code to appear on invoices."),
'sequence': fields.integer(
'Sequence', help=(
"Determine the display order in the report 'Accounting "
"\ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
),
}
_defaults = {
'sign': 1.0,
'notprintable': False,
}
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id, context=None):
'''
This function generates the tax codes from the templates of tax code that are children of the given one passed
in argument. Then it returns a dictionary with the mappping between the templates and the real objects.
:param tax_code_root_id: id of the root of all the tax code templates to process
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the real objects.
:rtype: dict
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False,
'company_id': company_id,
'sign': tax_code_template.sign,
'sequence': tax_code_template.sequence,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),('code', '=', vals['code']),('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context=context, load='_classic_write')
return [(x['id'], (x['code'] and x['code'] + ' - ' or '') + x['name']) \
for x in reads]
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive Tax Codes.', ['parent_id'])
]
_order = 'code,name'
class account_chart_template(osv.osv):
_name="account.chart.template"
_description= "Templates for Account Chart"
_columns={
'name': fields.char('Name', required=True),
'parent_id': fields.many2one('account.chart.template', 'Parent Chart Template'),
'code_digits': fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
'visible': fields.boolean('Can be Visible?', help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from templates, this is useful when you want to generate accounts of this template only when loading its child template."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list of taxes. This last choice assumes that the set of tax defined on this template is complete'),
'account_root_id': fields.many2one('account.account.template', 'Root Account', domain=[('parent_id','=',False)]),
'tax_code_root_id': fields.many2one('account.tax.code.template', 'Root Tax Code', domain=[('parent_id','=',False)]),
'tax_template_ids': fields.one2many('account.tax.template', 'chart_template_id', 'Tax Template List', help='List of all the taxes that have to be installed by the wizard'),
'bank_account_view_id': fields.many2one('account.account.template', 'Bank Account'),
'property_account_receivable': fields.many2one('account.account.template', 'Receivable Account'),
'property_account_payable': fields.many2one('account.account.template', 'Payable Account'),
'property_account_expense_categ': fields.many2one('account.account.template', 'Expense Category Account'),
'property_account_income_categ': fields.many2one('account.account.template', 'Income Category Account'),
'property_account_expense': fields.many2one('account.account.template', 'Expense Account on Product Template'),
'property_account_income': fields.many2one('account.account.template', 'Income Account on Product Template'),
'property_account_income_opening': fields.many2one('account.account.template', 'Opening Entries Income Account'),
'property_account_expense_opening': fields.many2one('account.account.template', 'Opening Entries Expense Account'),
}
_defaults = {
'visible': True,
'code_digits': 6,
'complete_tax_set': True,
}
class account_tax_template(osv.osv):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_columns = {
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'name': fields.char('Tax Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the taxes lines from lower sequences to higher ones. The order is important if you have a tax that has several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For Tax Type percent enter % ratio between 0-1."),
'type': fields.selection( [('percent','Percent'), ('fixed','Fixed'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True),
'applicable_type': fields.selection( [('true','True'), ('code','Python Code')], 'Applicable Type', required=True, help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account.template', 'Invoice Tax Account'),
'account_paid_id':fields.many2one('account.account.template', 'Refund Tax Account'),
'parent_id':fields.many2one('account.tax.template', 'Parent Tax Account', select=True),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code.template', 'Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code.template', 'Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1."),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1."),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code.template', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code.template', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1."),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1."),
'include_base_amount': fields.boolean('Include in Base Amount', help="Set if the amount of tax must be included in the base amount before computing the next taxes."),
'description': fields.char('Internal Name'),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Use In', required=True,),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': lambda *a: '''# price_unit\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': lambda *a: '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'type_tax_use': 'all',
'price_include': 0,
}
_order = 'sequence'
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
if context is None:
context = {}
res = {}
todo_dict = {}
tax_template_to_tax = {}
for tax in tax_templates:
vals_tax = {
'name':tax.name,
'sequence': tax.sequence,
'amount': tax.amount,
'type': tax.type,
'applicable_type': tax.applicable_type,
'domain': tax.domain,
'parent_id': tax.parent_id and ((tax.parent_id.id in tax_template_to_tax) and tax_template_to_tax[tax.parent_id.id]) or False,
'child_depend': tax.child_depend,
'python_compute': tax.python_compute,
'python_compute_inv': tax.python_compute_inv,
'python_applicable': tax.python_applicable,
'base_code_id': tax.base_code_id and ((tax.base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.base_code_id.id]) or False,
'tax_code_id': tax.tax_code_id and ((tax.tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.tax_code_id.id]) or False,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_code_id': tax.ref_base_code_id and ((tax.ref_base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_base_code_id.id]) or False,
'ref_tax_code_id': tax.ref_tax_code_id and ((tax.ref_tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_tax_code_id.id]) or False,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'include_base_amount': tax.include_base_amount,
'description': tax.description,
'company_id': company_id,
'type_tax_use': tax.type_tax_use,
'price_include': tax.price_include
}
new_tax = self.pool.get('account.tax').create(cr, uid, vals_tax)
tax_template_to_tax[tax.id] = new_tax
#as the accounts have not been created yet, we have to wait before filling these fields
todo_dict[new_tax] = {
'account_collected_id': tax.account_collected_id and tax.account_collected_id.id or False,
'account_paid_id': tax.account_paid_id and tax.account_paid_id.id or False,
}
res.update({'tax_template_to_tax': tax_template_to_tax, 'account_dict': todo_dict})
return res
# Fiscal Position Templates
class account_fiscal_position_template(osv.osv):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
_columns = {
'name': fields.char('Fiscal Position Template', required=True),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'account_ids': fields.one2many('account.fiscal.position.account.template', 'position_id', 'Account Mapping'),
'tax_ids': fields.one2many('account.fiscal.position.tax.template', 'position_id', 'Tax Mapping'),
'note': fields.text('Notes'),
}
def generate_fiscal_position(self, cr, uid, chart_temp_id, tax_template_ref, acc_template_ref, company_id, context=None):
"""
This method generate Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates.
:param chart_temp_id: Chart Template Id.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
if context is None:
context = {}
obj_tax_fp = self.pool.get('account.fiscal.position.tax')
obj_ac_fp = self.pool.get('account.fiscal.position.account')
obj_fiscal_position = self.pool.get('account.fiscal.position')
fp_ids = self.search(cr, uid, [('chart_template_id', '=', chart_temp_id)])
for position in self.browse(cr, uid, fp_ids, context=context):
new_fp = obj_fiscal_position.create(cr, uid, {'company_id': company_id, 'name': position.name, 'note': position.note})
for tax in position.tax_ids:
obj_tax_fp.create(cr, uid, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': new_fp
})
for acc in position.account_ids:
obj_ac_fp.create(cr, uid, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': new_fp
})
return True
class account_fiscal_position_tax_template(osv.osv):
_name = 'account.fiscal.position.tax.template'
_description = 'Template Tax Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Position', required=True, ondelete='cascade'),
'tax_src_id': fields.many2one('account.tax.template', 'Tax Source', required=True),
'tax_dest_id': fields.many2one('account.tax.template', 'Replacement Tax')
}
class account_fiscal_position_account_template(osv.osv):
_name = 'account.fiscal.position.account.template'
_description = 'Template Account Fiscal Mapping'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Mapping', required=True, ondelete='cascade'),
'account_src_id': fields.many2one('account.account.template', 'Account Source', domain=[('type','<>','view')], required=True),
'account_dest_id': fields.many2one('account.account.template', 'Account Destination', domain=[('type','<>','view')], required=True)
}
# ---------------------------------------------------------
# Account generation from template wizards
# ---------------------------------------------------------
class wizard_multi_charts_accounts(osv.osv_memory):
"""
Create a new account chart for a company.
Wizards ask for:
* a company
* an account chart template
* a number of digits for formatting code of non-view accounts
* a list of bank accounts owned by the company
Then, the wizard:
* generates all accounts from the template and assigns them to the right company
* generates all taxes and tax codes, changing account assignations
* generates all accounting properties and assigns them correctly
"""
_name='wizard.multi.charts.accounts'
_inherit = 'res.config'
_columns = {
'company_id':fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', help="Currency as per company's country."),
'only_one_chart_template': fields.boolean('Only One Chart Template Available'),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'bank_accounts_id': fields.one2many('account.bank.accounts.wizard', 'bank_account_id', 'Cash and Banks', required=True),
'code_digits':fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
"sale_tax": fields.many2one("account.tax.template", "Default Sale Tax"),
"purchase_tax": fields.many2one("account.tax.template", "Default Purchase Tax"),
'sale_tax_rate': fields.float('Sales Tax(%)'),
'purchase_tax_rate': fields.float('Purchase Tax(%)'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
}
def _get_chart_parent_ids(self, cr, uid, chart_template, context=None):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:param browse_record chart_template: the account.chart.template record
:return: the IDS of all ancestor charts, including the chart itself.
"""
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def onchange_tax_rate(self, cr, uid, ids, rate=False, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id=False, context=None):
res = {}
tax_templ_obj = self.pool.get('account.tax.template')
res['value'] = {'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False}
if chart_template_id:
data = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
currency_id = data.currency_id and data.currency_id.id or self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
res['value'].update({'complete_tax_set': data.complete_tax_set, 'currency_id': currency_id})
if data.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
chart_ids = self._get_chart_parent_ids(cr, uid, data, context=context)
base_tax_domain = [("chart_template_id", "in", chart_ids), ('parent_id', '=', False)]
sale_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('sale','all'))]
purchase_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('purchase','all'))]
sale_tax_ids = tax_templ_obj.search(cr, uid, sale_tax_domain, order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid, purchase_tax_domain, order="sequence, id desc")
res['value'].update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False,
'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.setdefault('domain', {})
res['domain']['sale_tax'] = repr(sale_tax_domain)
res['domain']['purchase_tax'] = repr(purchase_tax_domain)
if data.code_digits:
res['value'].update({'code_digits': data.code_digits})
return res
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_multi_charts_accounts, self).default_get(cr, uid, fields, context=context)
tax_templ_obj = self.pool.get('account.tax.template')
account_chart_template = self.pool['account.chart.template']
if 'bank_accounts_id' in fields:
res.update({'bank_accounts_id': [{'acc_name': _('Cash'), 'account_type': 'cash'},{'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0].company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company_obj = self.pool.get('res.company')
country_id = company_obj.browse(cr, uid, company_id, context=context).country_id.id
currency_id = company_obj.on_change_country(cr, uid, company_id, country_id, context=context)['value']['currency_id']
res.update({'currency_id': currency_id})
ids = account_chart_template.search(cr, uid, [('visible', '=', True)], context=context)
if ids:
#in order to set default chart which was last created set max of ids.
chart_id = max(ids)
if context.get("default_charts"):
model_data = self.pool.get('ir.model.data').search_read(cr, uid, [('model','=','account.chart.template'),('module','=',context.get("default_charts"))], ['res_id'], context=context)
if model_data:
chart_id = model_data[0]['res_id']
chart = account_chart_template.browse(cr, uid, chart_id, context=context)
chart_hierarchy_ids = self._get_chart_parent_ids(cr, uid, chart, context=context)
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(ids) == 1,
'chart_template_id': chart_id})
if 'sale_tax' in fields:
sale_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('sale','all'))],
order="sequence")
res.update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False})
if 'purchase_tax' in fields:
purchase_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('purchase','all'))],
order="sequence")
res.update({'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.update({
'purchase_tax_rate': 15.0,
'sale_tax_rate': 15.0,
})
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(wizard_multi_charts_accounts, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
cmp_select = []
acc_template_obj = self.pool.get('account.chart.template')
company_obj = self.pool.get('res.company')
company_ids = company_obj.search(cr, uid, [], context=context)
#display in the widget selection of companies, only the companies that haven't been configured yet (but don't care about the demo chart of accounts)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
unconfigured_cmp = list(set(company_ids)-set(configured_cmp))
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id','in',unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in company_obj.browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def check_created_journals(self, cr, uid, vals_journal, company_id, context=None):
"""
This method used for checking journals already created or not. If not then create new journal.
"""
obj_journal = self.pool.get('account.journal')
rec_list = obj_journal.search(cr, uid, [('name','=', vals_journal['name']),('company_id', '=', company_id)], context=context)
if not rec_list:
obj_journal.create(cr, uid, vals_journal, context=context)
return True
def generate_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method is used for creating journals.
:param chart_temp_id: Chart Template Id.
:param acc_template_ref: Account templates reference.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
journal_data = self._prepare_all_journals(cr, uid, chart_template_id, acc_template_ref, company_id, context=context)
for vals_journal in journal_data:
self.check_created_journals(cr, uid, vals_journal, company_id, context=context)
return True
def _prepare_all_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
def _get_analytic_journal(journal_type):
# Get the analytic journal
data = False
try:
if journal_type in ('sale', 'sale_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'analytic_journal_sale')
elif journal_type in ('purchase', 'purchase_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'exp')
elif journal_type == 'general':
pass
except ValueError:
pass
return data and data[1] or False
def _get_default_account(journal_type, type='debit'):
# Get the default accounts
default_account = False
if journal_type in ('sale', 'sale_refund'):
default_account = acc_template_ref.get(template.property_account_income_categ.id)
elif journal_type in ('purchase', 'purchase_refund'):
default_account = acc_template_ref.get(template.property_account_expense_categ.id)
elif journal_type == 'situation':
if type == 'debit':
default_account = acc_template_ref.get(template.property_account_expense_opening.id)
else:
default_account = acc_template_ref.get(template.property_account_income_opening.id)
return default_account
journal_names = {
'sale': _('Sales Journal'),
'purchase': _('Purchase Journal'),
'sale_refund': _('Sales Refund Journal'),
'purchase_refund': _('Purchase Refund Journal'),
'general': _('Miscellaneous Journal'),
'situation': _('Opening Entries Journal'),
}
journal_codes = {
'sale': _('SAJ'),
'purchase': _('EXJ'),
'sale_refund': _('SCNJ'),
'purchase_refund': _('ECNJ'),
'general': _('MISC'),
'situation': _('OPEJ'),
}
obj_data = self.pool.get('ir.model.data')
analytic_journal_obj = self.pool.get('account.analytic.journal')
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
journal_data = []
for journal_type in ['sale', 'purchase', 'sale_refund', 'purchase_refund', 'general', 'situation']:
vals = {
'type': journal_type,
'name': journal_names[journal_type],
'code': journal_codes[journal_type],
'company_id': company_id,
'centralisation': journal_type == 'situation',
'analytic_journal_id': _get_analytic_journal(journal_type),
'default_credit_account_id': _get_default_account(journal_type, 'credit'),
'default_debit_account_id': _get_default_account(journal_type, 'debit'),
}
journal_data.append(vals)
return journal_data
def generate_properties(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method used for creating properties.
:param chart_template_id: id of the current chart template for which we need to create properties
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
property_obj = self.pool.get('ir.property')
field_obj = self.pool.get('ir.model.fields')
todo_list = [
('property_account_receivable','res.partner','account.account'),
('property_account_payable','res.partner','account.account'),
('property_account_expense_categ','product.category','account.account'),
('property_account_income_categ','product.category','account.account'),
('property_account_expense','product.template','account.account'),
('property_account_income','product.template','account.account'),
]
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
for record in todo_list:
account = getattr(template, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = field_obj.search(cr, uid, [('name', '=', record[0]),('model', '=', record[1]),('relation', '=', record[2])], context=context)
vals = {
'name': record[0],
'company_id': company_id,
'fields_id': field[0],
'value': value,
}
property_ids = property_obj.search(cr, uid, [('name','=', record[0]),('company_id', '=', company_id)], context=context)
if property_ids:
#the property exist: modify it
property_obj.write(cr, uid, property_ids, vals, context=context)
else:
#create the property
property_obj.create(cr, uid, vals, context=context)
return True
def _install_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function recursively loads the template objects and create the real objects from them.
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
if template.parent_id:
tmp1, tmp2, tmp3 = self._install_template(cr, uid, template.parent_id.id, company_id, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
tmp1, tmp2, tmp3 = self._load_template(cr, uid, template_id, company_id, code_digits=code_digits, obj_wizard=obj_wizard, account_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
return acc_ref, taxes_ref, tax_code_ref
def _load_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, account_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function generates all the objects from the templates
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_acc_tax = self.pool.get('account.tax')
obj_tax_temp = self.pool.get('account.tax.template')
obj_acc_template = self.pool.get('account.account.template')
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
# create all the tax code.
tax_code_ref.update(obj_tax_code_template.generate_tax_code(cr, uid, template.tax_code_root_id.id, company_id, context=context))
# Generate taxes from templates.
tax_templates = [x for x in template.tax_template_ids]
generated_tax_res = obj_tax_temp._generate_tax(cr, uid, tax_templates, tax_code_ref, company_id, context=context)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = obj_acc_template.generate_account(cr, uid, template_id, taxes_ref, account_ref, code_digits, company_id, context=context)
account_ref.update(account_template_ref)
# writing account values on tax after creation of accounts
for key,value in generated_tax_res['account_dict'].items():
if value['account_collected_id'] or value['account_paid_id']:
obj_acc_tax.write(cr, uid, [key], {
'account_collected_id': account_ref.get(value['account_collected_id'], False),
'account_paid_id': account_ref.get(value['account_paid_id'], False),
})
# Create Journals
self.generate_journals(cr, uid, template_id, account_ref, company_id, context=context)
# generate properties function
self.generate_properties(cr, uid, template_id, account_ref, company_id, context=context)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
obj_fiscal_position_template.generate_fiscal_position(cr, uid, template_id, taxes_ref, account_ref, company_id, context=context)
return account_ref, taxes_ref, tax_code_ref
def _create_tax_templates_from_rates(self, cr, uid, obj_wizard, company_id, context=None):
'''
This function checks if the chosen chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax.code and for account.account.tax objects accordingly
to the provided sale/purchase rates. Then it saves the new tax templates as default taxes to use for this chart
template.
:param obj_wizard: browse record of wizard to generate COA from templates
:param company_id: id of the company for wich the wizard is running
:return: True
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_temp = self.pool.get('account.tax.template')
chart_template = obj_wizard.chart_template_id
vals = {}
all_parents = self._get_chart_parent_ids(cr, uid, chart_template, context=context)
# create tax templates and tax code templates from purchase_tax_rate and sale_tax_rate fields
if not chart_template.complete_tax_set:
value = obj_wizard.sale_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('sale','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Tax %.2f%%') % value})
value = obj_wizard.purchase_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('purchase','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Purchase Tax %.2f%%') % value})
return True
def execute(self, cr, uid, ids, context=None):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the tax codes, the
accounting properties... accordingly for the chosen company.
'''
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
obj_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
obj_wizard = self.browse(cr, uid, ids[0])
company_id = obj_wizard.company_id.id
self.pool.get('res.company').write(cr, uid, [company_id], {'currency_id': obj_wizard.currency_id.id})
# When we install the CoA of first company, set the currency to price types and pricelists
if company_id==1:
for ref in (('product','list_price'),('product','standard_price'),('product','list0'),('purchase','list0')):
try:
tmp2 = obj_data.get_object_reference(cr, uid, *ref)
if tmp2:
self.pool[tmp2[0]].write(cr, uid, tmp2[1], {
'currency_id': obj_wizard.currency_id.id
})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(cr, uid, obj_wizard, company_id, context=context)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref, tax_code_ref = self._install_template(cr, uid, obj_wizard.chart_template_id.id, company_id, code_digits=obj_wizard.code_digits, obj_wizard=obj_wizard, context=context)
# write values of default taxes for product as super user
if obj_wizard.sale_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.template', "taxes_id", [taxes_ref[obj_wizard.sale_tax.id]], for_all_users=True, company_id=company_id)
if obj_wizard.purchase_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.template', "supplier_taxes_id", [taxes_ref[obj_wizard.purchase_tax.id]], for_all_users=True, company_id=company_id)
# Create Bank journals
self._create_bank_journals_from_o2m(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
return {}
def _prepare_bank_journal(self, cr, uid, line, current_num, default_account_id, company_id, context=None):
'''
This function prepares the value to use for the creation of a bank journal created through the wizard of
generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param current_num: integer corresponding to a counter of the already created bank journals through this wizard.
:param default_account_id: id of the default debit.credit account created before for this journal.
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
obj_journal = self.pool.get('account.journal')
# we need to loop again to find next number for journal code
# because we can't rely on the value current_num as,
# its possible that we already have bank journals created (e.g. by the creation of res.partner.bank)
# and the next number for account code might have been already used before for journal
for num in xrange(current_num, 100):
# journal_code has a maximal size of 5, hence we can enforce the boundary num < 100
journal_code = _('BNK')[:3] + str(num)
ids = obj_journal.search(cr, uid, [('code', '=', journal_code), ('company_id', '=', company_id)], context=context)
if not ids:
break
else:
raise osv.except_osv(_('Error!'), _('Cannot generate an unused journal code.'))
vals = {
'name': line['acc_name'],
'code': journal_code,
'type': line['account_type'] == 'cash' and 'cash' or 'bank',
'company_id': company_id,
'analytic_journal_id': False,
'currency': False,
'default_credit_account_id': default_account_id,
'default_debit_account_id': default_account_id,
}
if line['currency_id']:
vals['currency'] = line['currency_id']
return vals
def _prepare_bank_account(self, cr, uid, line, new_code, acc_template_ref, ref_acc_bank, company_id, context=None):
'''
This function prepares the value to use for the creation of the default debit and credit accounts of a
bank journal created through the wizard of generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param new_code: integer corresponding to the next available number to use as account code
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:param ref_acc_bank: browse record of the account template set as root of all bank accounts for the chosen
template
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
# Get the id of the user types fr-or cash and bank
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_cash')
cash_type = tmp and tmp[1] or False
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_bank')
bank_type = tmp and tmp[1] or False
return {
'name': line['acc_name'],
'currency_id': line['currency_id'],
'code': new_code,
'type': 'liquidity',
'user_type': line['account_type'] == 'cash' and cash_type or bank_type,
'parent_id': acc_template_ref[ref_acc_bank.id] or False,
'company_id': company_id,
}
def _create_bank_journals_from_o2m(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_accounts_id of the
wizard.
:param obj_wizard: the current wizard that generates the COA from the templates.
:param company_id: the id of the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:return: True
'''
obj_acc = self.pool.get('account.account')
obj_journal = self.pool.get('account.journal')
code_digits = obj_wizard.code_digits
# Build a list with all the data to process
journal_data = []
if obj_wizard.bank_accounts_id:
for acc in obj_wizard.bank_accounts_id:
vals = {
'acc_name': acc.acc_name,
'account_type': acc.account_type,
'currency_id': acc.currency_id.id,
}
journal_data.append(vals)
ref_acc_bank = obj_wizard.chart_template_id.bank_account_view_id
if journal_data and not ref_acc_bank.code:
raise osv.except_osv(_('Configuration Error!'), _('You have to set a code for the bank account defined on the selected chart of accounts.'))
current_num = 1
for line in journal_data:
# Seek the next available number for the account code
while True:
new_code = str(ref_acc_bank.code.ljust(code_digits-len(str(current_num)), '0')) + str(current_num)
ids = obj_acc.search(cr, uid, [('code', '=', new_code), ('company_id', '=', company_id)])
if not ids:
break
else:
current_num += 1
# Create the default debit/credit accounts for this bank journal
vals = self._prepare_bank_account(cr, uid, line, new_code, acc_template_ref, ref_acc_bank, company_id, context=context)
default_account_id = obj_acc.create(cr, uid, vals, context=context)
#create the bank journal
vals_journal = self._prepare_bank_journal(cr, uid, line, current_num, default_account_id, company_id, context=context)
obj_journal.create(cr, uid, vals_journal)
current_num += 1
return True
class account_bank_accounts_wizard(osv.osv_memory):
_name='account.bank.accounts.wizard'
_columns = {
'acc_name': fields.char('Account Name.', required=True),
'bank_account_id': fields.many2one('wizard.multi.charts.accounts', 'Bank Account', required=True, ondelete='cascade'),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'account_type': fields.selection([('cash','Cash'), ('check','Check'), ('bank','Bank')], 'Account Type'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 744,763,512,084,534,700 | 52.459467 | 425 | 0.583011 | false |
hexlism/xx_net | gae_proxy/server/lib/google/appengine/tools/dev_appserver_import_hook.py | 5 | 45469 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Import hook for dev_appserver.py."""
import dummy_thread
import errno
import imp
import itertools
import locale
import logging
import mimetypes
import os
import pickle
import random
import re
import select
import socket
import sys
import urllib
try:
import distutils.util
except ImportError:
pass
from google.appengine import dist
SITE_PACKAGES = os.path.normcase(os.path.join(os.path.dirname(os.__file__),
'site-packages'))
import google
SDK_ROOT = os.path.dirname(os.path.dirname(google.__file__))
CODING_COOKIE_RE = re.compile("coding[:=]\s*([-\w.]+)")
DEFAULT_ENCODING = 'ascii'
def FakeURandom(n):
"""Fake version of os.urandom."""
bytes = ''
for _ in range(n):
bytes += chr(random.randint(0, 255))
return bytes
def FakeUname():
"""Fake version of os.uname."""
return ('Linux', '', '', '', '')
def FakeUnlink(path):
"""Fake version of os.unlink."""
if os.path.isdir(path):
raise OSError(errno.ENOENT, "Is a directory", path)
else:
raise OSError(errno.EPERM, "Operation not permitted", path)
def FakeReadlink(path):
"""Fake version of os.readlink."""
raise OSError(errno.EINVAL, "Invalid argument", path)
def FakeAccess(path, mode):
"""Fake version of os.access where only reads are supported."""
if not os.path.exists(path) or mode != os.R_OK:
return False
else:
return True
def FakeSetLocale(category, value=None, original_setlocale=locale.setlocale):
"""Fake version of locale.setlocale that only supports the default."""
if value not in (None, '', 'C', 'POSIX'):
raise locale.Error('locale emulation only supports "C" locale')
return original_setlocale(category, 'C')
def FakeOpen(filename, flags, mode=0777):
"""Fake version of os.open."""
raise OSError(errno.EPERM, "Operation not permitted", filename)
def FakeRename(src, dst):
"""Fake version of os.rename."""
raise OSError(errno.EPERM, "Operation not permitted", src)
def FakeUTime(path, times):
"""Fake version of os.utime."""
raise OSError(errno.EPERM, "Operation not permitted", path)
def FakeFileObject(fp, mode='rb', bufsize=-1, close=False):
"""Assuming that the argument is a StringIO or file instance."""
if not hasattr(fp, 'fileno'):
fp.fileno = lambda: None
return fp
def FakeGetHostByAddr(addr):
"""Fake version of socket.gethostbyaddr."""
raise NotImplementedError()
def FakeGetProtoByName(protocolname):
"""Fake version of socket.getprotobyname."""
raise NotImplementedError()
def FakeGetServByPort(portnumber, protocolname=None):
"""Fake version of socket.getservbyport."""
raise NotImplementedError()
def FakeGetNameInfo(sockaddr, flags):
"""Fake version of socket.getnameinfo."""
raise NotImplementedError()
def FakeSocketRecvInto(buf, nbytes=0, flags=0):
"""Fake version of socket.socket.recvinto."""
raise NotImplementedError()
def FakeSocketRecvFromInto(buffer, nbytes=0, flags=0):
"""Fake version of socket.socket.recvfrom_into."""
raise NotImplementedError()
def FakeGetPlatform():
"""Fake distutils.util.get_platform on OS/X. Pass-through otherwise."""
if sys.platform == 'darwin':
return 'macosx-'
else:
return distutils.util.get_platform()
def NeedsMacOSXProxyFakes():
"""Returns True if the MacOS X urllib fakes should be installed."""
return (sys.platform == 'darwin' and
(2, 6, 0) <= sys.version_info < (2, 6, 4))
if NeedsMacOSXProxyFakes():
def _FakeProxyBypassHelper(fn,
original_module_dict=sys.modules.copy(),
original_uname=os.uname):
"""Setups and restores the state for the Mac OS X urllib fakes."""
def Inner(*args, **kwargs):
current_uname = os.uname
current_meta_path = sys.meta_path[:]
current_modules = sys.modules.copy()
try:
sys.modules.clear()
sys.modules.update(original_module_dict)
sys.meta_path[:] = []
os.uname = original_uname
return fn(*args, **kwargs)
finally:
sys.modules.clear()
sys.modules.update(current_modules)
os.uname = current_uname
sys.meta_path[:] = current_meta_path
return Inner
@_FakeProxyBypassHelper
def FakeProxyBypassMacOSXSysconf(
host,
original_proxy_bypass_macosx_sysconf=urllib.proxy_bypass_macosx_sysconf):
"""Fake for urllib.proxy_bypass_macosx_sysconf for Python 2.6.0 to 2.6.3."""
return original_proxy_bypass_macosx_sysconf(host)
@_FakeProxyBypassHelper
def FakeGetProxiesMacOSXSysconf(
original_getproxies_macosx_sysconf=urllib.getproxies_macosx_sysconf):
"""Fake for urllib.getproxies_macosx_sysconf for Python 2.6.0 to 2.6.3."""
return original_getproxies_macosx_sysconf()
def IsPathInSubdirectories(filename,
subdirectories,
normcase=os.path.normcase):
"""Determines if a filename is contained within one of a set of directories.
Args:
filename: Path of the file (relative or absolute).
subdirectories: Iterable collection of paths to subdirectories which the
given filename may be under.
normcase: Used for dependency injection.
Returns:
True if the supplied filename is in one of the given sub-directories or
its hierarchy of children. False otherwise.
"""
file_dir = normcase(os.path.dirname(os.path.abspath(filename)))
for parent in subdirectories:
fixed_parent = normcase(os.path.abspath(parent))
if os.path.commonprefix([file_dir, fixed_parent]) == fixed_parent:
return True
return False
def GeneratePythonPaths(*p):
"""Generate all valid filenames for the given file.
Args:
p: Positional args are the folders to the file and finally the file
without a suffix.
Returns:
A list of strings representing the given path to a file with each valid
suffix for this python build.
"""
suffixes = imp.get_suffixes()
return [os.path.join(*p) + s for s, m, t in suffixes]
class FakeFile(file):
"""File sub-class that enforces the security restrictions of the production
environment.
"""
ALLOWED_MODES = frozenset(['r', 'rb', 'U', 'rU'])
ALLOWED_FILES = set(os.path.normcase(filename)
for filename in mimetypes.knownfiles
if os.path.isfile(filename))
ALLOWED_DIRS = set([
os.path.normcase(os.path.realpath(os.path.dirname(os.__file__))),
os.path.normcase(os.path.abspath(os.path.dirname(os.__file__))),
os.path.normcase(os.path.dirname(os.path.realpath(os.__file__))),
os.path.normcase(os.path.dirname(os.path.abspath(os.__file__))),
])
NOT_ALLOWED_DIRS = set([
SITE_PACKAGES,
])
ALLOWED_SITE_PACKAGE_DIRS = set(
os.path.normcase(os.path.abspath(os.path.join(SITE_PACKAGES, path)))
for path in [
])
ALLOWED_SITE_PACKAGE_FILES = set(
os.path.normcase(os.path.abspath(os.path.join(
os.path.dirname(os.__file__), 'site-packages', path)))
for path in itertools.chain(*[
[os.path.join('Crypto')],
GeneratePythonPaths('Crypto', '__init__'),
[os.path.join('Crypto', 'Cipher')],
GeneratePythonPaths('Crypto', 'Cipher', '__init__'),
GeneratePythonPaths('Crypto', 'Cipher', 'AES'),
GeneratePythonPaths('Crypto', 'Cipher', 'ARC2'),
GeneratePythonPaths('Crypto', 'Cipher', 'ARC4'),
GeneratePythonPaths('Crypto', 'Cipher', 'Blowfish'),
GeneratePythonPaths('Crypto', 'Cipher', 'CAST'),
GeneratePythonPaths('Crypto', 'Cipher', 'DES'),
GeneratePythonPaths('Crypto', 'Cipher', 'DES3'),
GeneratePythonPaths('Crypto', 'Cipher', 'XOR'),
[os.path.join('Crypto', 'Hash')],
GeneratePythonPaths('Crypto', 'Hash', '__init__'),
GeneratePythonPaths('Crypto', 'Hash', 'HMAC'),
os.path.join('Crypto', 'Hash', 'MD2'),
os.path.join('Crypto', 'Hash', 'MD4'),
GeneratePythonPaths('Crypto', 'Hash', 'MD5'),
GeneratePythonPaths('Crypto', 'Hash', 'SHA'),
os.path.join('Crypto', 'Hash', 'SHA256'),
os.path.join('Crypto', 'Hash', 'RIPEMD'),
[os.path.join('Crypto', 'Protocol')],
GeneratePythonPaths('Crypto', 'Protocol', '__init__'),
GeneratePythonPaths('Crypto', 'Protocol', 'AllOrNothing'),
GeneratePythonPaths('Crypto', 'Protocol', 'Chaffing'),
[os.path.join('Crypto', 'PublicKey')],
GeneratePythonPaths('Crypto', 'PublicKey', '__init__'),
GeneratePythonPaths('Crypto', 'PublicKey', 'DSA'),
GeneratePythonPaths('Crypto', 'PublicKey', 'ElGamal'),
GeneratePythonPaths('Crypto', 'PublicKey', 'RSA'),
GeneratePythonPaths('Crypto', 'PublicKey', 'pubkey'),
GeneratePythonPaths('Crypto', 'PublicKey', 'qNEW'),
[os.path.join('Crypto', 'Util')],
GeneratePythonPaths('Crypto', 'Util', '__init__'),
GeneratePythonPaths('Crypto', 'Util', 'RFC1751'),
GeneratePythonPaths('Crypto', 'Util', 'number'),
GeneratePythonPaths('Crypto', 'Util', 'randpool'),
]))
_original_file = file
_root_path = None
_application_paths = None
_skip_files = None
_static_file_config_matcher = None
_allow_skipped_files = True
_availability_cache = {}
@staticmethod
def SetAllowedPaths(root_path, application_paths):
"""Configures which paths are allowed to be accessed.
Must be called at least once before any file objects are created in the
hardened environment.
Args:
root_path: Absolute path to the root of the application.
application_paths: List of additional paths that the application may
access, this must include the App Engine runtime but
not the Python library directories.
"""
FakeFile._application_paths = (set(os.path.realpath(path)
for path in application_paths) |
set(os.path.abspath(path)
for path in application_paths))
FakeFile._application_paths.add(root_path)
FakeFile._root_path = os.path.join(root_path, '')
FakeFile._availability_cache = {}
@staticmethod
def SetAllowSkippedFiles(allow_skipped_files):
"""Configures access to files matching FakeFile._skip_files.
Args:
allow_skipped_files: Boolean whether to allow access to skipped files
"""
FakeFile._allow_skipped_files = allow_skipped_files
FakeFile._availability_cache = {}
@staticmethod
def SetAllowedModule(name):
"""Allow the use of a module based on where it is located.
Meant to be used by use_library() so that it has a link back into the
trusted part of the interpreter.
Args:
name: Name of the module to allow.
"""
stream, pathname, description = imp.find_module(name)
pathname = os.path.normcase(os.path.abspath(pathname))
if stream:
stream.close()
FakeFile.ALLOWED_FILES.add(pathname)
FakeFile.ALLOWED_FILES.add(os.path.realpath(pathname))
else:
assert description[2] == imp.PKG_DIRECTORY
if pathname.startswith(SITE_PACKAGES):
FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(pathname)
FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(os.path.realpath(pathname))
else:
FakeFile.ALLOWED_DIRS.add(pathname)
FakeFile.ALLOWED_DIRS.add(os.path.realpath(pathname))
@staticmethod
def SetSkippedFiles(skip_files):
"""Sets which files in the application directory are to be ignored.
Must be called at least once before any file objects are created in the
hardened environment.
Must be called whenever the configuration was updated.
Args:
skip_files: Object with .match() method (e.g. compiled regexp).
"""
FakeFile._skip_files = skip_files
FakeFile._availability_cache = {}
@staticmethod
def SetStaticFileConfigMatcher(static_file_config_matcher):
"""Sets StaticFileConfigMatcher instance for checking if a file is static.
Must be called at least once before any file objects are created in the
hardened environment.
Must be called whenever the configuration was updated.
Args:
static_file_config_matcher: StaticFileConfigMatcher instance.
"""
FakeFile._static_file_config_matcher = static_file_config_matcher
FakeFile._availability_cache = {}
@staticmethod
def IsFileAccessible(filename, normcase=os.path.normcase):
"""Determines if a file's path is accessible.
SetAllowedPaths(), SetSkippedFiles() and SetStaticFileConfigMatcher() must
be called before this method or else all file accesses will raise an error.
Args:
filename: Path of the file to check (relative or absolute). May be a
directory, in which case access for files inside that directory will
be checked.
normcase: Used for dependency injection.
Returns:
True if the file is accessible, False otherwise.
"""
logical_filename = normcase(os.path.abspath(filename))
result = FakeFile._availability_cache.get(logical_filename)
if result is None:
result = FakeFile._IsFileAccessibleNoCache(logical_filename,
normcase=normcase)
FakeFile._availability_cache[logical_filename] = result
return result
@staticmethod
def _IsFileAccessibleNoCache(logical_filename, normcase=os.path.normcase):
"""Determines if a file's path is accessible.
This is an internal part of the IsFileAccessible implementation.
Args:
logical_filename: Absolute path of the file to check.
normcase: Used for dependency injection.
Returns:
True if the file is accessible, False otherwise.
"""
logical_dirfakefile = logical_filename
if os.path.isdir(logical_filename):
logical_dirfakefile = os.path.join(logical_filename, 'foo')
if IsPathInSubdirectories(logical_dirfakefile, [FakeFile._root_path],
normcase=normcase):
relative_filename = logical_dirfakefile[len(FakeFile._root_path):]
if not FakeFile._allow_skipped_files:
path = relative_filename
while path != os.path.dirname(path):
if FakeFile._skip_files.match(path):
logging.warning('Blocking access to skipped file "%s"',
logical_filename)
return False
path = os.path.dirname(path)
if FakeFile._static_file_config_matcher.IsStaticFile(relative_filename):
logging.warning('Blocking access to static file "%s"',
logical_filename)
return False
if logical_filename in FakeFile.ALLOWED_FILES:
return True
if logical_filename in FakeFile.ALLOWED_SITE_PACKAGE_FILES:
return True
if IsPathInSubdirectories(logical_dirfakefile,
FakeFile.ALLOWED_SITE_PACKAGE_DIRS,
normcase=normcase):
return True
allowed_dirs = FakeFile._application_paths | FakeFile.ALLOWED_DIRS
if (IsPathInSubdirectories(logical_dirfakefile,
allowed_dirs,
normcase=normcase) and
not IsPathInSubdirectories(logical_dirfakefile,
FakeFile.NOT_ALLOWED_DIRS,
normcase=normcase)):
return True
return False
def __init__(self, filename, mode='r', bufsize=-1, **kwargs):
"""Initializer. See file built-in documentation."""
if mode not in FakeFile.ALLOWED_MODES:
raise IOError('invalid mode: %s' % mode)
if not FakeFile.IsFileAccessible(filename):
raise IOError(errno.EACCES, 'file not accessible', filename)
super(FakeFile, self).__init__(filename, mode, bufsize, **kwargs)
dist._library.SetAllowedModule = FakeFile.SetAllowedModule
class RestrictedPathFunction(object):
"""Enforces access restrictions for functions that have a file or
directory path as their first argument."""
_original_os = os
def __init__(self, original_func):
"""Initializer.
Args:
original_func: Callable that takes as its first argument the path to a
file or directory on disk; all subsequent arguments may be variable.
"""
self._original_func = original_func
def __call__(self, path, *args, **kwargs):
"""Enforces access permissions for the function passed to the constructor.
"""
if not FakeFile.IsFileAccessible(path):
raise OSError(errno.EACCES, 'path not accessible', path)
return self._original_func(path, *args, **kwargs)
def GetSubmoduleName(fullname):
"""Determines the leaf submodule name of a full module name.
Args:
fullname: Fully qualified module name, e.g. 'foo.bar.baz'
Returns:
Submodule name, e.g. 'baz'. If the supplied module has no submodule (e.g.,
'stuff'), the returned value will just be that module name ('stuff').
"""
return fullname.rsplit('.', 1)[-1]
class CouldNotFindModuleError(ImportError):
"""Raised when a module could not be found.
In contrast to when a module has been found, but cannot be loaded because of
hardening restrictions.
"""
def Trace(func):
"""Call stack logging decorator for HardenedModulesHook class.
This decorator logs the call stack of the HardenedModulesHook class as
it executes, indenting logging messages based on the current stack depth.
Args:
func: the function to decorate.
Returns:
The decorated function.
"""
def Decorate(self, *args, **kwargs):
args_to_show = []
if args is not None:
args_to_show.extend(str(argument) for argument in args)
if kwargs is not None:
args_to_show.extend('%s=%s' % (key, value)
for key, value in kwargs.iteritems())
args_string = ', '.join(args_to_show)
self.log('Entering %s(%s)', func.func_name, args_string)
self._indent_level += 1
try:
return func(self, *args, **kwargs)
finally:
self._indent_level -= 1
self.log('Exiting %s(%s)', func.func_name, args_string)
return Decorate
class HardenedModulesHook(object):
"""Meta import hook that restricts the modules used by applications to match
the production environment.
Module controls supported:
- Disallow native/extension modules from being loaded
- Disallow built-in and/or Python-distributed modules from being loaded
- Replace modules with completely empty modules
- Override specific module attributes
- Replace one module with another
After creation, this object should be added to the front of the sys.meta_path
list (which may need to be created). The sys.path_importer_cache dictionary
should also be cleared, to prevent loading any non-restricted modules.
See PEP302 for more info on how this works:
http://www.python.org/dev/peps/pep-0302/
"""
ENABLE_LOGGING = False
def log(self, message, *args):
"""Logs an import-related message to stderr, with indentation based on
current call-stack depth.
Args:
message: Logging format string.
args: Positional format parameters for the logging message.
"""
if HardenedModulesHook.ENABLE_LOGGING:
indent = self._indent_level * ' '
print >>sys.__stderr__, indent + (message % args)
_WHITE_LIST_C_MODULES = [
'py_streamhtmlparser',
'AES',
'ARC2',
'ARC4',
'Blowfish',
'CAST',
'DES',
'DES3',
'MD2',
'MD4',
'RIPEMD',
'SHA256',
'XOR',
'_Crypto_Cipher__AES',
'_Crypto_Cipher__ARC2',
'_Crypto_Cipher__ARC4',
'_Crypto_Cipher__Blowfish',
'_Crypto_Cipher__CAST',
'_Crypto_Cipher__DES',
'_Crypto_Cipher__DES3',
'_Crypto_Cipher__XOR',
'_Crypto_Hash__MD2',
'_Crypto_Hash__MD4',
'_Crypto_Hash__RIPEMD',
'_Crypto_Hash__SHA256',
'array',
'binascii',
'bz2',
'cmath',
'collections',
'crypt',
'cStringIO',
'datetime',
'errno',
'exceptions',
'gc',
'itertools',
'math',
'md5',
'operator',
'posix',
'posixpath',
'pyexpat',
'sha',
'struct',
'sys',
'time',
'timing',
'unicodedata',
'zlib',
'_ast',
'_bisect',
'_codecs',
'_codecs_cn',
'_codecs_hk',
'_codecs_iso2022',
'_codecs_jp',
'_codecs_kr',
'_codecs_tw',
'_collections',
'_csv',
'_elementtree',
'_functools',
'_hashlib',
'_heapq',
'_locale',
'_lsprof',
'_md5',
'_multibytecodec',
'_scproxy',
'_random',
'_sha',
'_sha256',
'_sha512',
'_sre',
'_struct',
'_types',
'_weakref',
'__main__',
]
_PY27_ALLOWED_MODULES = [
'_bytesio',
'_fileio',
'_io',
'_json',
'_symtable',
'_yaml',
'parser',
'strop',
]
__PY27_OPTIONAL_ALLOWED_MODULES = {
'django': [],
'jinja2': ['_speedups'],
'lxml': ['etree', 'objectify'],
'markupsafe': ['_speedups'],
'numpy': [
'_capi',
'_compiled_base',
'_dotblas',
'fftpack_lite',
'lapack_lite',
'mtrand',
'multiarray',
'scalarmath',
'_sort',
'umath',
'umath_tests',
],
'PIL': ['_imaging', '_imagingcms', '_imagingft', '_imagingmath'],
'setuptools': [],
}
__CRYPTO_CIPHER_ALLOWED_MODULES = [
'MODE_CBC',
'MODE_CFB',
'MODE_CTR',
'MODE_ECB',
'MODE_OFB',
'block_size',
'key_size',
'new',
]
_WHITE_LIST_PARTIAL_MODULES = {
'Crypto.Cipher.AES': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.ARC2': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.Blowfish': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.CAST': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.DES': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.DES3': __CRYPTO_CIPHER_ALLOWED_MODULES,
'gc': [
'enable',
'disable',
'isenabled',
'collect',
'get_debug',
'set_threshold',
'get_threshold',
'get_count'
],
'os': [
'access',
'altsep',
'curdir',
'defpath',
'devnull',
'environ',
'error',
'extsep',
'EX_NOHOST',
'EX_NOINPUT',
'EX_NOPERM',
'EX_NOUSER',
'EX_OK',
'EX_OSERR',
'EX_OSFILE',
'EX_PROTOCOL',
'EX_SOFTWARE',
'EX_TEMPFAIL',
'EX_UNAVAILABLE',
'EX_USAGE',
'F_OK',
'getcwd',
'getcwdu',
'getenv',
'listdir',
'lstat',
'name',
'NGROUPS_MAX',
'O_APPEND',
'O_CREAT',
'O_DIRECT',
'O_DIRECTORY',
'O_DSYNC',
'O_EXCL',
'O_LARGEFILE',
'O_NDELAY',
'O_NOCTTY',
'O_NOFOLLOW',
'O_NONBLOCK',
'O_RDONLY',
'O_RDWR',
'O_RSYNC',
'O_SYNC',
'O_TRUNC',
'O_WRONLY',
'open',
'pardir',
'path',
'pathsep',
'R_OK',
'readlink',
'remove',
'rename',
'SEEK_CUR',
'SEEK_END',
'SEEK_SET',
'sep',
'stat',
'stat_float_times',
'stat_result',
'strerror',
'TMP_MAX',
'unlink',
'urandom',
'utime',
'walk',
'WCOREDUMP',
'WEXITSTATUS',
'WIFEXITED',
'WIFSIGNALED',
'WIFSTOPPED',
'WNOHANG',
'WSTOPSIG',
'WTERMSIG',
'WUNTRACED',
'W_OK',
'X_OK',
],
'signal': [
],
'socket': [
'_GLOBAL_DEFAULT_TIMEOUT',
'AF_INET',
'SOCK_STREAM',
'SOCK_DGRAM',
'error',
'gaierror',
'herror',
'timeout',
'ssl',
'_fileobject',
],
'select': [
],
'ssl': [
],
}
_MODULE_OVERRIDES = {
'locale': {
'setlocale': FakeSetLocale,
},
'os': {
'access': FakeAccess,
'listdir': RestrictedPathFunction(os.listdir),
'lstat': RestrictedPathFunction(os.stat),
'open': FakeOpen,
'readlink': FakeReadlink,
'remove': FakeUnlink,
'rename': FakeRename,
'stat': RestrictedPathFunction(os.stat),
'uname': FakeUname,
'unlink': FakeUnlink,
'urandom': FakeURandom,
'utime': FakeUTime,
},
'signal': {
'__doc__': None,
},
'socket': {
'_fileobject': FakeFileObject,
'ssl': None,
},
'distutils.util': {
'get_platform': FakeGetPlatform,
},
}
_ENABLED_FILE_TYPES = (
imp.PKG_DIRECTORY,
imp.PY_SOURCE,
imp.PY_COMPILED,
imp.C_BUILTIN,
)
def __init__(self,
config,
module_dict,
app_code_path,
imp_module=imp,
os_module=os,
dummy_thread_module=dummy_thread,
pickle_module=pickle,
socket_module=socket,
select_module=select):
"""Initializer.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
module_dict: Module dictionary to use for managing system modules.
Should be sys.modules.
app_code_path: The absolute path to the application code on disk.
imp_module, os_module, dummy_thread_module, etc.: References to
modules that exist in the dev_appserver that must be used by this class
in order to function, even if these modules have been unloaded from
sys.modules.
"""
self._config = config
self._module_dict = module_dict
self._imp = imp_module
self._os = os_module
self._dummy_thread = dummy_thread_module
self._pickle = pickle
self._socket = socket_module
self._socket.buffer = buffer
self._select = select_module
self._indent_level = 0
self._app_code_path = app_code_path
self._white_list_c_modules = list(self._WHITE_LIST_C_MODULES)
self._white_list_partial_modules = dict(self._WHITE_LIST_PARTIAL_MODULES)
self._enabled_modules = []
if self._config and self._config.runtime == 'python27':
self._white_list_c_modules.extend(self._PY27_ALLOWED_MODULES)
self._white_list_partial_modules['os'] = (
list(self._white_list_partial_modules['os']) + ['getpid', 'getuid'])
if self._config.libraries:
for libentry in self._config.libraries:
self._enabled_modules.append(libentry.name)
extra = self.__PY27_OPTIONAL_ALLOWED_MODULES.get(libentry.name)
logging.debug('Enabling %s: %r', libentry.name, extra)
if extra:
self._white_list_c_modules.extend(extra)
if libentry.name == 'django':
if 'django' not in self._module_dict:
version = libentry.version
if version == 'latest':
version = '1.2'
sitedir = os.path.join(SDK_ROOT,
'lib',
'django_%s' % version.replace('.', '_'))
if os.path.isdir(sitedir):
logging.debug('Enabling Django version %s at %s',
version, sitedir)
sys.path[:] = [dirname
for dirname in sys.path
if not dirname.startswith(os.path.join(
SDK_ROOT, 'lib', 'django'))]
sys.path.insert(1, sitedir)
else:
logging.warn('Enabling Django version %s (no directory found)',
version)
@Trace
def find_module(self, fullname, path=None):
"""See PEP 302."""
if fullname in ('cPickle', 'thread'):
return self
search_path = path
all_modules = fullname.split('.')
try:
for index, current_module in enumerate(all_modules):
current_module_fullname = '.'.join(all_modules[:index + 1])
if (current_module_fullname == fullname and not
self.StubModuleExists(fullname)):
self.FindModuleRestricted(current_module,
current_module_fullname,
search_path)
else:
if current_module_fullname in self._module_dict:
module = self._module_dict[current_module_fullname]
else:
module = self.FindAndLoadModule(current_module,
current_module_fullname,
search_path)
if hasattr(module, '__path__'):
search_path = module.__path__
except CouldNotFindModuleError:
return None
return self
def StubModuleExists(self, name):
"""Check if the named module has a stub replacement."""
if name in sys.builtin_module_names:
name = 'py_%s' % name
if name in dist.__all__:
return True
return False
def ImportStubModule(self, name):
"""Import the stub module replacement for the specified module."""
if name in sys.builtin_module_names:
name = 'py_%s' % name
module = __import__(dist.__name__, {}, {}, [name])
return getattr(module, name)
@Trace
def FixModule(self, module):
"""Prunes and overrides restricted module attributes.
Args:
module: The module to prune. This should be a new module whose attributes
reference back to the real module's __dict__ members.
"""
if module.__name__ in self._white_list_partial_modules:
allowed_symbols = self._white_list_partial_modules[module.__name__]
for symbol in set(module.__dict__) - set(allowed_symbols):
if not (symbol.startswith('__') and symbol.endswith('__')):
del module.__dict__[symbol]
if module.__name__ in self._MODULE_OVERRIDES:
module.__dict__.update(self._MODULE_OVERRIDES[module.__name__])
if module.__name__ == 'urllib' and NeedsMacOSXProxyFakes():
module.__dict__.update(
{'proxy_bypass_macosx_sysconf': FakeProxyBypassMacOSXSysconf,
'getproxies_macosx_sysconf': FakeGetProxiesMacOSXSysconf})
@Trace
def FindModuleRestricted(self,
submodule,
submodule_fullname,
search_path):
"""Locates a module while enforcing module import restrictions.
Args:
submodule: The short name of the submodule (i.e., the last section of
the fullname; for 'foo.bar' this would be 'bar').
submodule_fullname: The fully qualified name of the module to find (e.g.,
'foo.bar').
search_path: List of paths to search for to find this module. Should be
None if the current sys.path should be used.
Returns:
Tuple (source_file, pathname, description) where:
source_file: File-like object that contains the module; in the case
of packages, this will be None, which implies to look at __init__.py.
pathname: String containing the full path of the module on disk.
description: Tuple returned by imp.find_module().
However, in the case of an import using a path hook (e.g. a zipfile),
source_file will be a PEP-302-style loader object, pathname will be None,
and description will be a tuple filled with None values.
Raises:
ImportError exception if the requested module was found, but importing
it is disallowed.
CouldNotFindModuleError exception if the request module could not even
be found for import.
"""
if search_path is None:
search_path = [None] + sys.path
module_import_ok = False
if self._config and self._config.runtime == 'python27':
topmodule = submodule_fullname.split('.')[0]
if topmodule in self.__PY27_OPTIONAL_ALLOWED_MODULES:
if topmodule in self._enabled_modules:
module_import_ok = True
else:
msg = ('Third party package %s must be included in the '
'"libraries:" clause of your app.yaml file '
'in order to be imported.' % topmodule)
logging.error(msg)
raise ImportError(msg)
import_error = None
for path_entry in search_path:
result = self.FindPathHook(submodule, submodule_fullname, path_entry)
if result is not None:
source_file, pathname, description = result
if description == (None, None, None):
return result
suffix, mode, file_type = description
try:
if (file_type not in (self._imp.C_BUILTIN, self._imp.C_EXTENSION) and
not module_import_ok and
not FakeFile.IsFileAccessible(pathname)):
error_message = 'Access to module file denied: %s' % pathname
logging.debug(error_message)
raise ImportError(error_message)
if (file_type not in self._ENABLED_FILE_TYPES and
submodule not in self._white_list_c_modules):
error_message = ('Could not import "%s": Disallowed C-extension '
'or built-in module' % submodule_fullname)
logging.debug(error_message)
raise ImportError(error_message)
return source_file, pathname, description
except ImportError, e:
import_error = e
if import_error:
raise import_error
self.log('Could not find module "%s"', submodule_fullname)
raise CouldNotFindModuleError()
def FindPathHook(self, submodule, submodule_fullname, path_entry):
"""Helper for FindModuleRestricted to find a module in a sys.path entry.
Args:
submodule:
submodule_fullname:
path_entry: A single sys.path entry, or None representing the builtins.
Returns:
Either None (if nothing was found), or a triple (source_file, path_name,
description). See the doc string for FindModuleRestricted() for the
meaning of the latter.
"""
if path_entry is None:
if submodule_fullname in sys.builtin_module_names:
try:
result = self._imp.find_module(submodule)
except ImportError:
pass
else:
source_file, pathname, description = result
suffix, mode, file_type = description
if file_type == self._imp.C_BUILTIN:
return result
return None
if path_entry in sys.path_importer_cache:
importer = sys.path_importer_cache[path_entry]
else:
importer = None
for hook in sys.path_hooks:
try:
importer = hook(path_entry)
break
except ImportError:
pass
sys.path_importer_cache[path_entry] = importer
if importer is None:
try:
return self._imp.find_module(submodule, [path_entry])
except ImportError:
pass
else:
loader = importer.find_module(submodule)
if loader is not None:
return (loader, None, (None, None, None))
return None
@Trace
def LoadModuleRestricted(self,
submodule_fullname,
source_file,
pathname,
description):
"""Loads a module while enforcing module import restrictions.
As a byproduct, the new module will be added to the module dictionary.
Args:
submodule_fullname: The fully qualified name of the module to find (e.g.,
'foo.bar').
source_file: File-like object that contains the module's source code,
or a PEP-302-style loader object.
pathname: String containing the full path of the module on disk.
description: Tuple returned by imp.find_module(), or (None, None, None)
in case source_file is a PEP-302-style loader object.
Returns:
The new module.
Raises:
ImportError exception of the specified module could not be loaded for
whatever reason.
"""
if description == (None, None, None):
return source_file.load_module(submodule_fullname)
try:
try:
return self._imp.load_module(submodule_fullname,
source_file,
pathname,
description)
except:
if submodule_fullname in self._module_dict:
del self._module_dict[submodule_fullname]
raise
finally:
if source_file is not None:
source_file.close()
@Trace
def FindAndLoadModule(self,
submodule,
submodule_fullname,
search_path):
"""Finds and loads a module, loads it, and adds it to the module dictionary.
Args:
submodule: Name of the module to import (e.g., baz).
submodule_fullname: Full name of the module to import (e.g., foo.bar.baz).
search_path: Path to use for searching for this submodule. For top-level
modules this should be None; otherwise it should be the __path__
attribute from the parent package.
Returns:
A new module instance that has been inserted into the module dictionary
supplied to __init__.
Raises:
ImportError exception if the module could not be loaded for whatever
reason (e.g., missing, not allowed).
"""
module = self._imp.new_module(submodule_fullname)
if submodule_fullname == 'thread':
module.__dict__.update(self._dummy_thread.__dict__)
module.__name__ = 'thread'
elif submodule_fullname == 'cPickle':
module.__dict__.update(self._pickle.__dict__)
module.__name__ = 'cPickle'
elif submodule_fullname == 'os':
module.__dict__.update(self._os.__dict__)
elif submodule_fullname == 'socket':
module.__dict__.update(self._socket.__dict__)
elif submodule_fullname == 'select':
module.__dict__.update(self._select.__dict__)
elif submodule_fullname == 'ssl':
pass
elif self.StubModuleExists(submodule_fullname):
module = self.ImportStubModule(submodule_fullname)
else:
source_file, pathname, description = self.FindModuleRestricted(submodule, submodule_fullname, search_path)
module = self.LoadModuleRestricted(submodule_fullname,
source_file,
pathname,
description)
if (getattr(module, '__path__', None) is not None and
search_path != self._app_code_path):
try:
app_search_path = os.path.join(self._app_code_path,
*(submodule_fullname.split('.')[:-1]))
source_file, pathname, description = self.FindModuleRestricted(submodule,
submodule_fullname,
[app_search_path])
module.__path__.append(pathname)
except ImportError, e:
pass
module.__loader__ = self
self.FixModule(module)
if submodule_fullname not in self._module_dict:
self._module_dict[submodule_fullname] = module
if submodule_fullname == 'os':
os_path_name = module.path.__name__
os_path = self.FindAndLoadModule(os_path_name, os_path_name, search_path)
self._module_dict['os.path'] = os_path
module.__dict__['path'] = os_path
return module
@Trace
def GetParentPackage(self, fullname):
"""Retrieves the parent package of a fully qualified module name.
Args:
fullname: Full name of the module whose parent should be retrieved (e.g.,
foo.bar).
Returns:
Module instance for the parent or None if there is no parent module.
Raise:
ImportError exception if the module's parent could not be found.
"""
all_modules = fullname.split('.')
parent_module_fullname = '.'.join(all_modules[:-1])
if parent_module_fullname:
if self.find_module(fullname) is None:
raise ImportError('Could not find module %s' % fullname)
return self._module_dict[parent_module_fullname]
return None
@Trace
def GetParentSearchPath(self, fullname):
"""Determines the search path of a module's parent package.
Args:
fullname: Full name of the module to look up (e.g., foo.bar).
Returns:
Tuple (submodule, search_path) where:
submodule: The last portion of the module name from fullname (e.g.,
if fullname is foo.bar, then this is bar).
search_path: List of paths that belong to the parent package's search
path or None if there is no parent package.
Raises:
ImportError exception if the module or its parent could not be found.
"""
submodule = GetSubmoduleName(fullname)
parent_package = self.GetParentPackage(fullname)
search_path = None
if parent_package is not None and hasattr(parent_package, '__path__'):
search_path = parent_package.__path__
return submodule, search_path
@Trace
def GetModuleInfo(self, fullname):
"""Determines the path on disk and the search path of a module or package.
Args:
fullname: Full name of the module to look up (e.g., foo.bar).
Returns:
Tuple (pathname, search_path, submodule) where:
pathname: String containing the full path of the module on disk,
or None if the module wasn't loaded from disk (e.g. from a zipfile).
search_path: List of paths that belong to the found package's search
path or None if found module is not a package.
submodule: The relative name of the submodule that's being imported.
"""
submodule, search_path = self.GetParentSearchPath(fullname)
source_file, pathname, description = self.FindModuleRestricted(submodule, fullname, search_path)
suffix, mode, file_type = description
module_search_path = None
if file_type == self._imp.PKG_DIRECTORY:
module_search_path = [pathname]
pathname = os.path.join(pathname, '__init__%spy' % os.extsep)
return pathname, module_search_path, submodule
@Trace
def load_module(self, fullname):
"""See PEP 302."""
all_modules = fullname.split('.')
submodule = all_modules[-1]
parent_module_fullname = '.'.join(all_modules[:-1])
search_path = None
if parent_module_fullname and parent_module_fullname in self._module_dict:
parent_module = self._module_dict[parent_module_fullname]
if hasattr(parent_module, '__path__'):
search_path = parent_module.__path__
return self.FindAndLoadModule(submodule, fullname, search_path)
@Trace
def is_package(self, fullname):
"""See PEP 302 extensions."""
submodule, search_path = self.GetParentSearchPath(fullname)
source_file, pathname, description = self.FindModuleRestricted(submodule, fullname, search_path)
suffix, mode, file_type = description
if file_type == self._imp.PKG_DIRECTORY:
return True
return False
@Trace
def get_source(self, fullname):
"""See PEP 302 extensions."""
full_path, search_path, submodule = self.GetModuleInfo(fullname)
if full_path is None:
return None
source_file = open(full_path)
try:
return source_file.read()
finally:
source_file.close()
@Trace
def get_code(self, fullname):
"""See PEP 302 extensions."""
full_path, search_path, submodule = self.GetModuleInfo(fullname)
if full_path is None:
return None
source_file = open(full_path)
try:
source_code = source_file.read()
finally:
source_file.close()
source_code = source_code.replace('\r\n', '\n')
if not source_code.endswith('\n'):
source_code += '\n'
encoding = DEFAULT_ENCODING
for line in source_code.split('\n', 2)[:2]:
matches = CODING_COOKIE_RE.findall(line)
if matches:
encoding = matches[0].lower()
source_code.decode(encoding)
return compile(source_code, full_path, 'exec')
| bsd-2-clause | -7,049,346,292,556,689,000 | 24.107123 | 112 | 0.601553 | false |
victorywang80/Maintenance | saltstack/src/salt/tops/ext_nodes.py | 1 | 1750 | # -*- coding: utf-8 -*-
'''
External Nodes Classifier
=========================
The External Nodes Classifier is a master tops subsystem used to hook into
systems used to provide mapping information used by major configuration
management systems. One of the most common external nodes classification
system is provided by Cobbler and is called ``cobbler-ext-nodes``.
The cobbler-ext-nodes command can be used with this configuration:
.. code-block:: yaml
master_tops:
ext_nodes: cobbler-ext-nodes
It is noteworthy that the Salt system does not directly ingest the data
sent from the ``cobbler-ext-nodes`` command, but converts the data into
information that is used by a Salt top file.
'''
# Import python libs
import subprocess
# Import third party libs
import yaml
def __virtual__():
'''
Only run if properly configured
'''
if __opts__['master_tops'].get('ext_nodes'):
return 'ext_nodes'
return False
def top(**kwargs):
'''
Run the command configured
'''
if not 'id' in kwargs['opts']:
return {}
cmd = '{0} {1}'.format(
__opts__['master_tops']['ext_nodes'],
kwargs['opts']['id']
)
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
ret = {}
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
return ret
| apache-2.0 | -952,423,394,170,368,900 | 24.735294 | 74 | 0.594857 | false |
MissCatLady/AlarmEZ | venv/lib/python2.7/site-packages/passlib/handlers/cisco.py | 19 | 8517 | """passlib.handlers.cisco - Cisco password hashes"""
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify, unhexlify
from hashlib import md5
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import h64, right_pad_string, to_unicode
from passlib.utils.compat import b, bascii_to_str, bytes, unicode, u, join_byte_values, \
join_byte_elems, byte_elem_value, iter_byte_values, uascii_to_str, str_to_uascii
import passlib.utils.handlers as uh
# local
__all__ = [
"cisco_pix",
"cisco_type7",
]
#=============================================================================
# cisco pix firewall hash
#=============================================================================
class cisco_pix(uh.HasUserContext, uh.StaticHandler):
"""This class implements the password hash used by Cisco PIX firewalls,
and follows the :ref:`password-hash-api`.
It does a single round of hashing, and relies on the username
as the salt.
The :meth:`~passlib.ifc.PasswordHash.encrypt`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods
have the following extra keyword:
:type user: str
:param user:
String containing name of user account this password is associated with.
This is *required* in order to correctly hash passwords associated
with a user account on the Cisco device, as it is used to salt
the hash.
Conversely, this *must* be omitted or set to ``""`` in order to correctly
hash passwords which don't have an associated user account
(such as the "enable" password).
"""
#===================================================================
# class attrs
#===================================================================
name = "cisco_pix"
checksum_size = 16
checksum_chars = uh.HASH64_CHARS
#===================================================================
# methods
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
# XXX: no idea what unicode policy is, but all examples are
# 7-bit ascii compatible, so using UTF-8
secret = secret.encode("utf-8")
user = self.user
if user:
# not positive about this, but it looks like per-user
# accounts use the first 4 chars of the username as the salt,
# whereas global "enable" passwords don't have any salt at all.
if isinstance(user, unicode):
user = user.encode("utf-8")
secret += user[:4]
# null-pad or truncate to 16 bytes
secret = right_pad_string(secret, 16)
# md5 digest
hash = md5(secret).digest()
# drop every 4th byte
hash = join_byte_elems(c for i,c in enumerate(hash) if i & 3 < 3)
# encode using Hash64
return h64.encode_bytes(hash).decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# type 7
#=============================================================================
class cisco_type7(uh.GenericHandler):
"""This class implements the Type 7 password encoding used by Cisco IOS,
and follows the :ref:`password-hash-api`.
It has a simple 4-5 bit salt, but is nonetheless a reversible encoding
instead of a real hash.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genhash` methods
have the following optional keywords:
:type salt: int
:param salt:
This may be an optional salt integer drawn from ``range(0,16)``.
If omitted, one will be chosen at random.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` values that are out of range.
Note that while this class outputs digests in upper-case hexidecimal,
it will accept lower-case as well.
This class also provides the following additional method:
.. automethod:: decode
"""
#===================================================================
# class attrs
#===================================================================
name = "cisco_type7"
setting_kwds = ("salt",)
checksum_chars = uh.UPPER_HEX_CHARS
# NOTE: encoding could handle max_salt_value=99, but since key is only 52
# chars in size, not sure what appropriate behavior is for that edge case.
min_salt_value = 0
max_salt_value = 52
#===================================================================
# methods
#===================================================================
@classmethod
def genconfig(cls):
return None
@classmethod
def genhash(cls, secret, config):
# special case to handle ``config=None`` in same style as StaticHandler
if config is None:
return cls.encrypt(secret)
else:
return super(cisco_type7, cls).genhash(secret, config)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
if len(hash) < 2:
raise uh.exc.InvalidHashError(cls)
salt = int(hash[:2]) # may throw ValueError
return cls(salt=salt, checksum=hash[2:].upper())
def __init__(self, salt=None, **kwds):
super(cisco_type7, self).__init__(**kwds)
self.salt = self._norm_salt(salt)
def _norm_salt(self, salt):
"the salt for this algorithm is an integer 0-52, not a string"
# XXX: not entirely sure that values >15 are valid, so for
# compatibility we don't output those values, but we do accept them.
if salt is None:
if self.use_defaults:
salt = self._generate_salt()
else:
raise TypeError("no salt specified")
if not isinstance(salt, int):
raise uh.exc.ExpectedTypeError(salt, "integer", "salt")
if salt < 0 or salt > self.max_salt_value:
msg = "salt/offset must be in 0..52 range"
if self.relaxed:
warn(msg, uh.PasslibHashWarning)
salt = 0 if salt < 0 else self.max_salt_value
else:
raise ValueError(msg)
return salt
def _generate_salt(self):
return uh.rng.randint(0, 15)
def to_string(self):
return "%02d%s" % (self.salt, uascii_to_str(self.checksum))
def _calc_checksum(self, secret):
# XXX: no idea what unicode policy is, but all examples are
# 7-bit ascii compatible, so using UTF-8
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return hexlify(self._cipher(secret, self.salt)).decode("ascii").upper()
@classmethod
def decode(cls, hash, encoding="utf-8"):
"""decode hash, returning original password.
:arg hash: encoded password
:param encoding: optional encoding to use (defaults to ``UTF-8``).
:returns: password as unicode
"""
self = cls.from_string(hash)
tmp = unhexlify(self.checksum.encode("ascii"))
raw = self._cipher(tmp, self.salt)
return raw.decode(encoding) if encoding else raw
# type7 uses a xor-based vingere variant, using the following secret key:
_key = u("dsfd;kfoA,.iyewrkldJKDHSUBsgvca69834ncxv9873254k;fg87")
@classmethod
def _cipher(cls, data, salt):
"xor static key against data - encrypts & decrypts"
key = cls._key
key_size = len(key)
return join_byte_values(
value ^ ord(key[(salt + idx) % key_size])
for idx, value in enumerate(iter_byte_values(data))
)
#=============================================================================
# eof
#=============================================================================
| mit | -5,510,771,039,623,949,000 | 37.890411 | 146 | 0.527885 | false |
apple/swift | utils/gyb_syntax_support/PatternNodes.py | 13 | 3650 | from .Child import Child
from .Node import Node # noqa: I201
PATTERN_NODES = [
# type-annotation -> ':' type
Node('TypeAnnotation', kind='Syntax',
children=[
Child('Colon', kind='ColonToken'),
Child('Type', kind='Type'),
]),
# enum-case-pattern -> type-identifier? '.' identifier tuple-pattern?
Node('EnumCasePattern', kind='Pattern',
children=[
Child('Type', kind='Type',
is_optional=True),
Child('Period', kind='PeriodToken'),
Child('CaseName', kind='IdentifierToken'),
Child('AssociatedTuple', kind='TuplePattern',
is_optional=True),
]),
# is-type-pattern -> 'is' type
Node('IsTypePattern', kind='Pattern',
children=[
Child('IsKeyword', kind='IsToken'),
Child('Type', kind='Type'),
]),
# optional-pattern -> pattern '?'
Node('OptionalPattern', kind='Pattern',
children=[
Child('SubPattern', kind='Pattern'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# identifier-pattern -> identifier
Node('IdentifierPattern', kind='Pattern',
children=[
Child('Identifier', kind='Token',
token_choices=[
'SelfToken',
'IdentifierToken',
]),
]),
# as-pattern -> pattern 'as' type
Node('AsTypePattern', kind='Pattern',
children=[
Child('Pattern', kind='Pattern'),
Child('AsKeyword', kind='AsToken'),
Child('Type', kind='Type'),
]),
# tuple-pattern -> '(' tuple-pattern-element-list ')'
Node('TuplePattern', kind='Pattern',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Elements', kind='TuplePatternElementList',
collection_element_name='Element'),
Child('RightParen', kind='RightParenToken'),
]),
# wildcard-pattern -> '_' type-annotation?
Node('WildcardPattern', kind='Pattern',
children=[
Child('Wildcard', kind='WildcardToken'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
]),
# tuple-pattern-element -> identifier? ':' pattern ','?
Node('TuplePatternElement', kind='Syntax',
traits=['WithTrailingComma', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('Pattern', kind='Pattern'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# expr-pattern -> expr
Node('ExpressionPattern', kind='Pattern',
children=[
Child('Expression', kind='Expr'),
]),
# tuple-pattern-element-list -> tuple-pattern-element
# tuple-pattern-element-list?
Node('TuplePatternElementList', kind='SyntaxCollection',
element='TuplePatternElement'),
# value-binding-pattern -> 'let' pattern
# | 'var' pattern
Node('ValueBindingPattern', kind='Pattern',
children=[
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken',
'VarToken',
]),
Child('ValuePattern', kind='Pattern'),
]),
]
| apache-2.0 | -5,359,074,479,707,960,000 | 32.486239 | 73 | 0.514521 | false |
will-moore/openmicroscopy | components/tools/OmeroPy/src/omero/gateway/scripts/dbhelpers.py | 9 | 19384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('.')
import omero.gateway
import omero.model
import os
import subprocess
import urllib2
from types import StringTypes
from path import path
BASEPATH = os.path.dirname(os.path.abspath(__file__))
TESTIMG_URL = 'http://downloads.openmicroscopy.org/images/gateway_tests/'
DEFAULT_GROUP_PERMS = 'rwr---'
if not omero.gateway.BlitzGateway.ICE_CONFIG:
try:
import settings
iceconfig = os.environ.get('ICE_CONFIG', None)
if iceconfig is None:
iceconfig = os.path.join(settings.OMERO_HOME, 'etc', 'ice.config')
omero.gateway.BlitzGateway.ICE_CONFIG = iceconfig
except ImportError:
pass
except AttributeError:
pass
# Gateway = omero.gateway.BlitzGateway
def refreshConfig():
bg = omero.gateway.BlitzGateway()
try:
ru = bg.c.ic.getProperties().getProperty('omero.rootuser')
rp = bg.c.ic.getProperties().getProperty('omero.rootpass')
finally:
bg.seppuku()
if ru:
ROOT.name = ru
if rp:
ROOT.passwd = rp
def loginAsRoot():
refreshConfig()
return login(ROOT)
def loginAsPublic():
return login(settings.PUBLIC_USER, settings.PUBLIC_PASSWORD)
def login(alias, pw=None, groupname=None):
if isinstance(alias, UserEntry):
return alias.login(groupname=groupname)
elif pw is None:
return USERS[alias].login(groupname=groupname)
else:
return UserEntry(alias, pw).login(groupname=groupname)
class BadGroupPermissionsException(Exception):
pass
class UserEntry (object):
def __init__(self, name, passwd, firstname='', middlename='', lastname='',
email='', ldap=False, groupname=None, groupperms=None,
groupowner=False, admin=False):
"""
If no groupperms are passed, then check_group_perms will do nothing.
The default perms for newly created groups is defined
in _getOrCreateGroup
"""
self.name = name
self.passwd = passwd
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.ldap = ldap
self.email = email
self.admin = admin
self.groupname = groupname
self.groupperms = groupperms
self.groupowner = groupowner
def fullname(self):
return '%s %s' % (self.firstname, self.lastname)
def login(self, groupname=None):
if groupname is None:
groupname = self.groupname
client = omero.gateway.BlitzGateway(
self.name, self.passwd, group=groupname, try_super=self.admin)
if not client.connect():
print "Can not connect"
return None
a = client.getAdminService()
if groupname is not None:
if client.getEventContext().groupName != groupname:
try:
g = a.lookupGroup(groupname)
client.setGroupForSession(g.getId().val)
except:
pass
# Reset group name and evaluate
self.groupname = a.getEventContext().groupName
if self.groupname != "system":
UserEntry.check_group_perms(
client, self.groupname, self.groupperms)
return client
@staticmethod
def check_group_perms(client, group, groupperms):
"""
If expected permissions have been set, then this will
enforce equality. If groupperms are None, then
nothing will be checked.
"""
if groupperms is not None:
if isinstance(group, StringTypes):
a = client.getAdminService()
g = a.lookupGroup(group)
else:
g = group
p = g.getDetails().getPermissions()
if str(p) != groupperms:
raise BadGroupPermissionsException(
"%s group has wrong permissions! Expected: %s Found: %s" %
(g.getName(), groupperms, p))
@staticmethod
def assert_group_perms(client, group, groupperms):
"""
If expected permissions have been set, then this will
change group permissions to those requested if not
already equal. If groupperms are None, then
nothing will be checked.
"""
a = client.getAdminService()
try:
if isinstance(group, StringTypes):
g = a.lookupGroup(group)
else:
g = group
UserEntry.check_group_perms(client, g, groupperms)
except BadGroupPermissionsException:
client._waitOnCmd(client.chmodGroup(g.id.val, groupperms))
@staticmethod
def _getOrCreateGroup(client, groupname, ldap=False, groupperms=None):
# Default on class is None
if groupperms is None:
groupperms = DEFAULT_GROUP_PERMS
a = client.getAdminService()
try:
g = a.lookupGroup(groupname)
except:
g = omero.model.ExperimenterGroupI()
g.setName(omero.gateway.omero_type(groupname))
g.setLdap(omero.gateway.omero_type(ldap))
p = omero.model.PermissionsI(groupperms)
g.details.setPermissions(p)
a.createGroup(g)
g = a.lookupGroup(groupname)
UserEntry.check_group_perms(client, groupname, groupperms)
return g
def create(self, client, password):
a = client.getAdminService()
try:
a.lookupExperimenter(self.name)
# print "Already exists: %s" % self.name
return False
except:
# print "Creating: %s" % self.name
pass
if self.groupname is None:
self.groupname = self.name + '_group'
g = UserEntry._getOrCreateGroup(
client, self.groupname, groupperms=self.groupperms)
u = omero.model.ExperimenterI()
u.setOmeName(omero.gateway.omero_type(self.name))
u.setFirstName(omero.gateway.omero_type(self.firstname))
u.setMiddleName(omero.gateway.omero_type(self.middlename))
u.setLastName(omero.gateway.omero_type(self.lastname))
u.setLdap(omero.gateway.omero_type(self.ldap))
u.setEmail(omero.gateway.omero_type(self.email))
a.createUser(u, g.getName().val)
u = a.lookupExperimenter(self.name)
if self.admin:
a.addGroups(u, (a.lookupGroup("system"),))
client.c.sf.setSecurityPassword(password) # See #3202
a.changeUserPassword(
u.getOmeName().val, omero.gateway.omero_type(self.passwd))
if self.groupowner:
a.setGroupOwner(g, u)
return True
def changePassword(self, client, password, rootpass):
a = client.getAdminService()
client.c.sf.setSecurityPassword(rootpass) # See #3202
a.changeUserPassword(self.name, omero.gateway.omero_type(password))
@staticmethod
def addGroupToUser(client, groupname, groupperms=None):
if groupperms is None:
groupperms = DEFAULT_GROUP_PERMS
a = client.getAdminService()
admin_gateway = None
try:
if 'system' not in [x.name.val for x in a.containedGroups(
client.getUserId())]:
admin_gateway = loginAsRoot()
a = admin_gateway.getAdminService()
g = UserEntry._getOrCreateGroup(
client, groupname, groupperms=groupperms)
a.addGroups(a.getExperimenter(client.getUserId()), (g,))
finally:
# Always clean up the results of login
if admin_gateway:
admin_gateway.seppuku()
@staticmethod
def setGroupForSession(client, groupname, groupperms=None):
if groupperms is None:
groupperms = DEFAULT_GROUP_PERMS
a = client.getAdminService()
if groupname not in [x.name.val for x in a.containedGroups(
client.getUserId())]:
UserEntry.addGroupToUser(client, groupname, groupperms)
# Must reconnect to read new groupexperimentermap
t = client.clone()
client.c.closeSession()
client._proxies = omero.gateway.NoProxies()
client._ctx = None
client.c = t.c
client.connect()
a = client.getAdminService()
g = a.lookupGroup(groupname)
client.setGroupForSession(g.getId().val)
return client
class ObjectEntry (object):
pass
class ProjectEntry (ObjectEntry):
def __init__(self, name, owner, create_group=False, group_perms=None):
self.name = name
self.owner = owner
self.create_group = create_group
self.group_perms = group_perms
def get(self, client=None, fromCreate=False):
if client is None:
client = USERS[self.owner].login()
for p in client.listProjects():
if p.getName() == self.name:
p.__loadedHotSwap__()
return p
return None
def create(self, client=None):
if client is None:
client = USERS[self.owner].login()
p = self.get(client)
if p is not None:
return p
p = omero.model.ProjectI(loaded=True)
p.setName(omero.gateway.omero_type(self.name))
p.setDescription(omero.gateway.omero_type(self.name))
if self.create_group:
if isinstance(self.create_group, StringTypes):
groupname = self.create_group
else:
raise ValueError('group must be string')
groupname = 'project_test'
s = loginAsRoot()
UserEntry._getOrCreateGroup(
s, groupname, groupperms=self.group_perms)
try:
UserEntry.addGroupToUser(s, groupname, self.group_perms)
finally:
s.seppuku()
UserEntry.setGroupForSession(client, groupname, self.group_perms)
p = omero.gateway.ProjectWrapper(
client, client.getUpdateService().saveAndReturnObject(p))
return self.get(client, True)
class DatasetEntry (ObjectEntry):
def __init__(self, name, project, description=None, callback=None):
self.name = name
self.project = project
self.description = description
self.callback = callback
def get(self, client, forceproj=None):
if forceproj is None:
if isinstance(self.project, StringTypes):
project = PROJECTS[self.project].get(client)
elif isinstance(self.project, ProjectEntry):
project = self.project.get(client)
else:
project = self.project
else:
project = forceproj
for d in project.listChildren():
if d.getName() == self.name and self.description_check(d):
d.__loadedHotSwap__()
return d
return None
def create(self):
if isinstance(self.project, StringTypes):
project = PROJECTS[self.project]
user = USERS[project.owner]
client = user.login()
project = project.get(client)
else:
project = self.project
client = project._conn
d = self.get(client, project)
if d is not None and self.description_check(d):
return d
d = omero.model.DatasetI(loaded=True)
d.setName(omero.gateway.omero_type(self.name))
if self.description is not None:
d.setDescription(omero.gateway.omero_type(self.description))
project.linkDataset(d)
project.save()
rv = self.get(client, project)
if self.callback:
self.callback(rv)
return rv
def description_check(self, d):
desc_match = (
omero.gateway.omero_type(d.getDescription()) ==
omero.gateway.omero_type(self.description))
desc_check = (
(self.description is None and d.getDescription() == '')
or (self.description is not None and desc_match))
return desc_check
class ImageEntry (ObjectEntry):
def __init__(self, name, filename, dataset, callback=None):
self.name = name
self.filename = filename # If False will create image without pixels
if self.name is None and filename:
self.name = os.path.basename(filename)
self.dataset = dataset
self.callback = callback
def get(self, client, forceds=None):
if forceds is None:
dataset = DATASETS[self.dataset].get(client)
else:
dataset = forceds
for i in dataset.listChildren():
if i.getName() == self.name:
return i
return None
def create(self):
if isinstance(self.dataset, StringTypes):
dataset = DATASETS[self.dataset]
project = PROJECTS[dataset.project]
client = USERS[project.owner].login()
dataset = dataset.get(client)
else:
dataset = self.dataset
client = dataset._conn
i = self.get(client, dataset)
if i is not None:
# print ".. -> image already exists: %s" % self.name
return i
# print ".. -> create new image: %s" % self.name
sys.stderr.write('I')
if self.filename is False:
UserEntry.setGroupForSession(
client, dataset.getDetails().getGroup().getName())
self._createWithoutPixels(client, dataset)
return self.get(client, dataset)
fpath = os.path.join(BASEPATH, self.filename)
if not os.path.exists(fpath):
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
# First try to download the image
try:
# print "Trying to get test image from " + TESTIMG_URL +
# self.filename
sys.stderr.write('<')
f = urllib2.urlopen(TESTIMG_URL + self.filename)
open(fpath, 'wb').write(f.read())
except urllib2.HTTPError:
raise IOError('No such file %s' % fpath)
host = dataset._conn.c.ic.getProperties().getProperty(
'omero.host') or 'localhost'
port = dataset._conn.c.ic.getProperties().getProperty(
'omero.port') or '4063'
possiblepaths = (
# Running from dist
path(".") / ".." / "bin" / "omero",
# Running from OmeroPy
path(".") / ".." / ".." / ".." / "dist" / "bin" / "omero",
# Running from OmeroWeb
path(".") / ".." / ".." / ".." / "bin" / "omero",
# not found
"omero",
)
for exe in possiblepaths:
if exe.exists():
break
if exe == 'omero':
print "\n\nNo omero found!" \
"Add OMERO_HOME/bin to your PATH variable (See #5176)\n\n"
newconn = dataset._conn.clone()
newconn.connect()
try:
UserEntry.setGroupForSession(
newconn, dataset.getDetails().getGroup().getName())
session = newconn._sessionUuid
# print session
exe += ' -s %s -k %s -p %s import -d %i -n' % (
host, session, port, dataset.getId())
exe = exe.split() + [self.name, fpath]
print ' '.join(exe)
try:
p = subprocess.Popen(
exe, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
print "!!Please make sure the 'omero' executable is in PATH"
return None
# print ' '.join(exe)
# [0].strip() #re.search(
# 'Saving pixels id: (\d*)', p.communicate()[0]).group(1)
pid = p.communicate()
# print pid
try:
img = omero.gateway.ImageWrapper(
dataset._conn,
dataset._conn.getQueryService().find(
'Pixels', long(pid[0].split('\n')[0].strip())).image)
except ValueError:
print pid
raise
# print "imgid = %i" % img.getId()
img.setName(self.name)
# img._obj.objectiveSettings = None
img.save()
if self.callback:
self.callback(img)
return img
finally:
newconn.seppuku() # Always cleanup the return from clone/connect
def _createWithoutPixels(self, client, dataset):
img = omero.model.ImageI()
img.setName(omero.gateway.omero_type(self.name))
if not dataset.imageLinksLoaded:
print ".!."
dataset._obj._imageLinksSeq = []
dataset._obj._imageLinksLoaded = True
dataset.linkImage(img)
dataset.save()
def getProject(client, alias):
return PROJECTS[alias].get(client)
def assertCommentAnnotation(object, ns, value):
ann = object.getAnnotation(ns)
if ann is None or ann.getValue() != value:
ann = omero.gateway.CommentAnnotationWrapper()
ann.setNs(ns)
ann.setValue(value)
object.linkAnnotation(ann)
return ann
def getDataset(client, alias, forceproj=None):
return DATASETS[alias].get(client, forceproj)
def getImage(client, alias, forceds=None, autocreate=False):
rv = IMAGES[alias].get(client, forceds)
if rv is None and autocreate:
i = IMAGES[alias].create()
i._conn.seppuku()
rv = IMAGES[alias].get(client, forceds)
return rv
def bootstrap(onlyUsers=False, skipImages=True):
# Create users
client = loginAsRoot()
try:
for k, u in USERS.items():
if not u.create(client, ROOT.passwd):
u.changePassword(client, u.passwd, ROOT.passwd)
u.assert_group_perms(client, u.groupname, u.groupperms)
if onlyUsers:
return
for k, p in PROJECTS.items():
p = p.create()
p._conn.seppuku()
# print p.get(client).getDetails().getPermissions().isUserWrite()
for k, d in DATASETS.items():
d = d.create()
d._conn.seppuku()
if not skipImages:
for k, i in IMAGES.items():
i = i.create()
i._conn.seppuku()
finally:
client.seppuku()
def cleanup():
for k, p in PROJECTS.items():
sys.stderr.write('*')
p = p.get()
if p is not None:
client = p._conn
handle = client.deleteObjects(
'Project', [p.getId()], deleteAnns=True, deleteChildren=True)
try:
client._waitOnCmd(handle)
finally:
handle.close()
client.seppuku()
client = loginAsRoot()
for k, u in USERS.items():
u.changePassword(client, None, ROOT.passwd)
client.seppuku()
ROOT = UserEntry('root', 'ome', admin=True)
USERS = {
# 'alias': UserEntry entry,
}
PROJECTS = {
# 'alias': ProjectEntry entry,
}
DATASETS = {
# 'alias': DatasetEntry entry,
}
IMAGES = {
# 'alias': ImageEntry entry,
}
| gpl-2.0 | 331,579,324,663,315,800 | 32.478411 | 78 | 0.56851 | false |
diana-hep/femtocode | lang/tests/test_syntax.py | 1 | 77475 | #!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import re
import sys
import unittest
from femtocode.asts.parsingtree import *
from femtocode.parser import parse
from femtocode.py23 import *
class TestSyntax(unittest.TestCase):
def runTest(self):
pass
def check(self, source, theirs=None, linenumbers=False):
if theirs is None:
theirs = ast.parse(source).body[0].value
mine = parse(source).expression
else:
mine = parse(source)
# verify that even the line numbers are the same
global same, treeOne, treeTwo
same = True
treeOne = ""
treeTwo = ""
def deepcompare(one, two, indent):
global same, treeOne, treeTwo
if isinstance(one, ast.AST):
if not (isinstance(two, ast.AST) and one._fields == two._fields and one.__class__ == two.__class__):
same = False
if linenumbers:
if not (getattr(one, "lineno", "?") == getattr(two, "lineno", "?") and getattr(one, "col_offset", "?") == getattr(two, "col_offset", "?")):
if hasattr(one, "lineno") and hasattr(one, "col_offset"):
# Python's lineno/col_offset for strings with line breaks is wrong.
# Don't count it against my implementation for getting it right.
if not isinstance(one, ast.Str) and not (isinstance(one, ast.Expr) and isinstance(one.value, ast.Str)):
same = False
if not (hasattr(two, "lineno") and hasattr(two, "col_offset")):
raise Exception
treeOne += one.__class__.__name__ + " " + str(getattr(one, "lineno", "?")) + ":" + str(getattr(one, "col_offset", "?")) + "\n"
treeTwo += two.__class__.__name__ + " " + str(getattr(two, "lineno", "?")) + ":" + str(getattr(two, "col_offset", "?")) + "\n"
if same:
for attrib in one._fields:
treeOne += indent + " " + attrib + ": "
treeTwo += indent + " " + attrib + ": "
valueOne = getattr(one, attrib)
valueTwo = getattr(two, attrib)
if isinstance(valueOne, list):
if not (isinstance(valueTwo, list) and len(valueOne) == len(valueTwo)):
same = False
if len(valueOne) == 0:
treeOne += "[]\n"
else:
treeOne += "\n"
if len(valueTwo) == 0:
treeTwo += "[]\n"
else:
treeTwo += "\n"
for x, y in zip(valueOne, valueTwo):
treeOne += indent + " - "
treeTwo += indent + " - "
deepcompare(x, y, indent + " ")
elif isinstance(valueOne, (ast.Load, ast.Store, ast.Param, ast.Del)):
if not (isinstance(valueTwo, (ast.Load, ast.Store, ast.Param, ast.Del))):
same = False
treeOne += valueOne.__class__.__name__ + "\n"
treeTwo += valueTwo.__class__.__name__ + "\n"
elif isinstance(valueOne, ast.AST):
if not (isinstance(valueTwo, ast.AST)):
same = False
deepcompare(valueOne, valueTwo, indent + " ")
elif valueOne is None or isinstance(valueOne, (int, long, float, complex) + string_types):
if not (valueOne == valueTwo):
same = False
treeOne += repr(valueOne) + "\n"
treeTwo += repr(valueTwo) + "\n"
else:
raise Exception
else:
if not (one == two):
same = False
deepcompare(theirs, mine, "")
if not same:
sys.stderr.write("Error in parsing: " + source + "\n\n")
treeOne = treeOne.split("\n")
treeTwo = treeTwo.split("\n")
width = max(len(x) for x in treeOne) + 3
x = "Expected"
y = "Parser output"
diff = x != re.sub("\s*\(.*\)", "", y)
while len(x) < width:
x += " "
sys.stderr.write(x + "| " + y + "\n")
x = "-" * len(x)
sys.stderr.write(x + "+-" + x + "\n")
while len(treeOne) < len(treeTwo):
treeOne.append("")
while len(treeTwo) < len(treeOne):
treeTwo.append("")
for x, y in zip(treeOne, treeTwo):
diff = x != re.sub("\s*\(.*\)", "", y)
while len(x) < width:
x += " "
if diff:
sys.stderr.write(x + "> " + y + "\n")
else:
sys.stderr.write(x + " " + y + "\n")
sys.exit(-1) # too much output to see all at once
def test_python(self):
self.check('"hello"')
self.check('"he\\nllo"')
self.check('"he\\\\nllo"')
self.check('"he\\"\\\\nllo"')
self.check('"he\'\\"\\\\nllo"')
self.check('"he\'\\"\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3llo"')
self.check('"he\'\\"\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\\N{LATIN SMALL LETTER ETH}llo"')
self.check('"""hello"""')
self.check('"""he\\nllo"""')
self.check('"""he\\\\nllo"""')
self.check('"""he\\"\\\\nllo"""')
self.check('"""he\'\\"\\\\nllo"""')
self.check('"""he\'\\"\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3llo"""')
self.check('"""he\'\\"\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\\N{LATIN SMALL LETTER ETH}llo"""')
self.check('"""he\'\\"\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\nllo"""')
self.check('"""he\'\\"\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\n"llo"""')
self.check("'hello'")
self.check("'he\\nllo'")
self.check("'he\\\\nllo'")
self.check("'he\\'\\\\nllo'")
self.check("'he\"\\'\\\\nllo'")
self.check("'he\"\\'\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3llo'")
self.check("'he\"\\'\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\\N{LATIN SMALL LETTER ETH}llo'")
self.check("'''hello'''")
self.check("'''he\\nllo'''")
self.check("'''he\\\\nllo'''")
self.check("'''he\\'\\\\nllo'''")
self.check("'''he\"\\'\\\\nllo'''")
self.check("'''he\"\\'\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3llo'''")
self.check("'''he\"\\'\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\\N{LATIN SMALL LETTER ETH}llo'''")
self.check("'''he\"\\'\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\nllo'''")
self.check("'''he\"\\'\\\\n\\a\\b\\f\\r\\t\\v\\123\\o123\\xf3\n'llo'''")
self.check('''.3''')
self.check('''-3''')
self.check('''- 3''')
self.check('''- 3''')
self.check('''--3''')
self.check('''-- 3''')
self.check('''- -3''')
self.check('''- - 3''')
self.check('''- - 3''')
self.check('''+3''')
self.check('''+ 3''')
self.check('''+ 3''')
self.check('''++3''')
self.check('''++ 3''')
self.check('''+ +3''')
self.check('''+ + 3''')
self.check('''+ + 3''')
self.check('''+-3''')
self.check('''+- 3''')
self.check('''+ -3''')
self.check('''+ - 3''')
self.check('''+ - 3''')
self.check('''-+3''')
self.check('''-+ 3''')
self.check('''- +3''')
self.check('''- + 3''')
self.check('''- + 3''')
self.check('''-3.14''')
self.check('''- 3.14''')
self.check('''- 3.14''')
self.check('''--3.14''')
self.check('''-- 3.14''')
self.check('''- -3.14''')
self.check('''- - 3.14''')
self.check('''- - 3.14''')
self.check('''+3.14''')
self.check('''+ 3.14''')
self.check('''+ 3.14''')
self.check('''++3.14''')
self.check('''++ 3.14''')
self.check('''+ +3.14''')
self.check('''+ + 3.14''')
self.check('''+ + 3.14''')
self.check('''+-3.14''')
self.check('''+- 3.14''')
self.check('''+ -3.14''')
self.check('''+ - 3.14''')
self.check('''+ - 3.14''')
self.check('''-+3.14''')
self.check('''-+ 3.14''')
self.check('''- +3.14''')
self.check('''- + 3.14''')
self.check('''- + 3.14''')
self.check('''-3e1''')
self.check('''- 3e1''')
self.check('''- 3e1''')
self.check('''--3e1''')
self.check('''-- 3e1''')
self.check('''- -3e1''')
self.check('''- - 3e1''')
self.check('''- - 3e1''')
self.check('''+3e1''')
self.check('''+ 3e1''')
self.check('''+ 3e1''')
self.check('''++3e1''')
self.check('''++ 3e1''')
self.check('''+ +3e1''')
self.check('''+ + 3e1''')
self.check('''+ + 3e1''')
self.check('''+-3e1''')
self.check('''+- 3e1''')
self.check('''+ -3e1''')
self.check('''+ - 3e1''')
self.check('''+ - 3e1''')
self.check('''-+3e1''')
self.check('''-+ 3e1''')
self.check('''- +3e1''')
self.check('''- + 3e1''')
self.check('''- + 3e1''')
self.check('''[]''')
self.check('''[3]''')
self.check('''[3,]''')
self.check('''[3, 4]''')
self.check('''[3, 4,]''')
self.check('''[3, 4, 5]''')
self.check('''[3, 4, 5,]''')
self.check('''[3, 4, 5, 6]''')
self.check('''[3, 4, 5, 6,]''')
self.check('''[[1], 2, 3, 4, 5]''')
self.check('''[[1, 2], 3, 4, 5]''')
self.check('''[[1, 2, 3], 4, 5]''')
self.check('''[[1, 2, 3, 4], 5]''')
self.check('''[[1, 2, 3, 4, 5]]''')
self.check('''[[[1], 2, 3, 4, 5]]''')
self.check('''[[[1, 2], 3, 4, 5]]''')
self.check('''[[[1, 2, 3], 4, 5]]''')
self.check('''[[[1, 2, 3, 4], 5]]''')
self.check('''[[[1, 2, 3, 4, 5]]]''')
self.check('''[1, 2, 3, 4, [5]]''')
self.check('''[1, 2, 3, [4, 5]]''')
self.check('''[1, 2, [3, 4, 5]]''')
self.check('''[1, [2, 3, 4, 5]]''')
self.check('''[[1, 2, 3, 4, [5]]]''')
self.check('''[[1, 2, 3, [4, 5]]]''')
self.check('''[[1, 2, [3, 4, 5]]]''')
self.check('''[[1, [2, 3, 4, 5]]]''')
self.check('''3
''')
self.check('''3
''')
self.check('''3
''')
self.check('''3
''')
self.check('''
3''')
self.check('''
3''')
self.check('''
3''')
self.check('''
3''')
self.check('''a''')
self.check('''a.b''')
self.check('''a.b.c''')
self.check('''a.b.c.d''')
self.check('''a.b.c.d.e''')
self.check('''a[1]''')
self.check('''a[1][2]''')
self.check('''a[1][2][3]''')
self.check('''a[1][2][3][4]''')
self.check('''(9).stuff''')
self.check('''((9)).stuff''')
self.check('''(((9))).stuff''')
self.check('''a[1]''')
self.check('''a["hey"]''')
self.check('''a[1:2]''')
self.check('''a[:]''')
self.check('''a[1:]''')
self.check('''a[:1]''')
self.check('''a[::]''')
self.check('''a[1::]''')
self.check('''a[:1:]''')
self.check('''a[::1]''')
self.check('''a[1:2:]''')
self.check('''a[:1:2]''')
self.check('''a[1::2]''')
self.check('''a[1:2:3]''')
self.check('''a[1,]''')
self.check('''a["hey",]''')
self.check('''a[1:2,]''')
self.check('''a[:,]''')
self.check('''a[1:,]''')
self.check('''a[:1,]''')
self.check('''a[::,]''')
self.check('''a[1::,]''')
self.check('''a[:1:,]''')
self.check('''a[::1,]''')
self.check('''a[1:2:,]''')
self.check('''a[:1:2,]''')
self.check('''a[1::2,]''')
self.check('''a[1:2:3,]''')
self.check('''a[1,5]''')
self.check('''a["hey",5]''')
self.check('''a[1:2,5]''')
self.check('''a[:,5]''')
self.check('''a[1:,5]''')
self.check('''a[:1,5]''')
self.check('''a[::,5]''')
self.check('''a[1::,5]''')
self.check('''a[:1:,5]''')
self.check('''a[::1,5]''')
self.check('''a[1:2:,5]''')
self.check('''a[:1:2,5]''')
self.check('''a[1::2,5]''')
self.check('''a[1:2:3,5]''')
self.check('''a[1,5,]''')
self.check('''a["hey",5,]''')
self.check('''a[1:2,5,]''')
self.check('''a[:,5,]''')
self.check('''a[1:,5,]''')
self.check('''a[:1,5,]''')
self.check('''a[::,5,]''')
self.check('''a[1::,5,]''')
self.check('''a[:1:,5,]''')
self.check('''a[::1,5,]''')
self.check('''a[1:2:,5,]''')
self.check('''a[:1:2,5,]''')
self.check('''a[1::2,5,]''')
self.check('''a[1:2:3,5,]''')
self.check('''a[1,"a":"b"]''')
self.check('''a["hey","a":"b"]''')
self.check('''a[1:2,"a":"b"]''')
self.check('''a[:,"a":"b"]''')
self.check('''a[1:,"a":"b"]''')
self.check('''a[:1,"a":"b"]''')
self.check('''a[::,"a":"b"]''')
self.check('''a[1::,"a":"b"]''')
self.check('''a[:1:,"a":"b"]''')
self.check('''a[::1,"a":"b"]''')
self.check('''a[1:2:,"a":"b"]''')
self.check('''a[:1:2,"a":"b"]''')
self.check('''a[1::2,"a":"b"]''')
self.check('''a[1:2:3,"a":"b"]''')
self.check('''a[1,"a":"b",]''')
self.check('''a["hey","a":"b",]''')
self.check('''a[1:2,"a":"b",]''')
self.check('''a[:,"a":"b",]''')
self.check('''a[1:,"a":"b",]''')
self.check('''a[:1,"a":"b",]''')
self.check('''a[::,"a":"b",]''')
self.check('''a[1::,"a":"b",]''')
self.check('''a[:1:,"a":"b",]''')
self.check('''a[::1,"a":"b",]''')
self.check('''a[1:2:,"a":"b",]''')
self.check('''a[:1:2,"a":"b",]''')
self.check('''a[1::2,"a":"b",]''')
self.check('''a[1:2:3,"a":"b",]''')
self.check('''a[1,5,6]''')
self.check('''a["hey",5,6]''')
self.check('''a[1:2,5,6]''')
self.check('''a[:,5,6]''')
self.check('''a[1:,5,6]''')
self.check('''a[:1,5,6]''')
self.check('''a[::,5,6]''')
self.check('''a[1::,5,6]''')
self.check('''a[:1:,5,6]''')
self.check('''a[::1,5,6]''')
self.check('''a[1:2:,5,6]''')
self.check('''a[:1:2,5,6]''')
self.check('''a[1::2,5,6]''')
self.check('''a[1:2:3,5,6]''')
self.check('''a[1,5,6,]''')
self.check('''a["hey",5,6,]''')
self.check('''a[1:2,5,6,]''')
self.check('''a[:,5,6,]''')
self.check('''a[1:,5,6,]''')
self.check('''a[:1,5,6,]''')
self.check('''a[::,5,6,]''')
self.check('''a[1::,5,6,]''')
self.check('''a[:1:,5,6,]''')
self.check('''a[::1,5,6,]''')
self.check('''a[1:2:,5,6,]''')
self.check('''a[:1:2,5,6,]''')
self.check('''a[1::2,5,6,]''')
self.check('''a[1:2:3,5,6,]''')
self.check('''a[2].three''')
self.check('''a.three''')
self.check('''a[2]''')
self.check('''a.three[2]''')
self.check('''x and y''')
self.check('''x and y and z''')
self.check('''x and y and z and w''')
self.check('''not x''')
self.check('''not x and y''')
self.check('''x or y''')
self.check('''x or y and z''')
self.check('''x or y or z''')
self.check('''not x or y and z''')
self.check('''x or not y and z''')
self.check('''x or y and not z''')
self.check('''not x or not y and z''')
self.check('''not x or y and not z''')
self.check('''x or not y and not z''')
self.check('''not x or not y and not z''')
self.check('''x and y or z''')
self.check('''not x and y or z''')
self.check('''x and not y or z''')
self.check('''x and y or not z''')
self.check('''not x and not y or z''')
self.check('''not x and y or not z''')
self.check('''x and not y or not z''')
self.check('''x < y''')
self.check('''x > y''')
self.check('''x == y''')
self.check('''x >= y''')
self.check('''x <= y''')
self.check('''x != y''')
self.check('''x in y''')
self.check('''x not in y''')
self.check('''1 < y < 2''')
self.check('''1 < y == 2''')
self.check('''(x) < y''')
self.check('''(x) > y''')
self.check('''(x) == y''')
self.check('''(x) >= y''')
self.check('''(x) <= y''')
self.check('''(x) != y''')
self.check('''(x) in y''')
self.check('''(x) not in y''')
self.check('''(1) < y < 2''')
self.check('''(1) < y == 2''')
self.check('''x < (y)''')
self.check('''x > (y)''')
self.check('''x == (y)''')
self.check('''x >= (y)''')
self.check('''x <= (y)''')
self.check('''x != (y)''')
self.check('''x in (y)''')
self.check('''x not in (y)''')
self.check('''1 < (y) < 2''')
self.check('''1 < (y) == 2''')
self.check('''1 < y < (2)''')
self.check('''1 < y == (2)''')
self.check('''(x) < (y)''')
self.check('''(x) > (y)''')
self.check('''(x) == (y)''')
self.check('''(x) >= (y)''')
self.check('''(x) <= (y)''')
self.check('''(x) != (y)''')
self.check('''(x) in (y)''')
self.check('''(x) not in (y)''')
self.check('''(1) < (y) < 2''')
self.check('''(1) < (y) == 2''')
self.check('''(1) < y < (2)''')
self.check('''(1) < y == (2)''')
self.check('''x + y''')
self.check('''x + y + z''')
self.check('''x + y + z + w''')
self.check('''x - y''')
self.check('''x - y - z''')
self.check('''x - y - z - w''')
self.check('''x - y + z - w''')
self.check('''x * y''')
self.check('''x * y * z''')
self.check('''x * y * z * w''')
self.check('''x * y - z * w''')
self.check('''x / y''')
self.check('''x / y / z''')
self.check('''x / y / z / w''')
self.check('''x / y * z / w''')
self.check('''x % y''')
self.check('''x % y % z''')
self.check('''x % y % z % w''')
self.check('''x % y / z % w''')
self.check('''x // y''')
self.check('''x // y // z''')
self.check('''x // y // z // w''')
self.check('''x // y % z // w''')
self.check('''+x''')
self.check('''-x''')
self.check('''++x''')
self.check('''+-x''')
self.check('''-+x''')
self.check('''--x''')
self.check('''+x + y''')
self.check('''-x + y''')
self.check('''++x + y''')
self.check('''+-x + y''')
self.check('''-+x + y''')
self.check('''--x + y''')
self.check('''x + +x''')
self.check('''x + -x''')
self.check('''x + ++x''')
self.check('''x + +-x''')
self.check('''x + -+x''')
self.check('''x + --x''')
self.check('''x ** y''')
self.check('''x ** y ** z''')
self.check('''x ** y ** z ** w''')
self.check('''x ** y // z ** w''')
self.check('''x.y**2''')
def test_newforms(self):
self.check('{x => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x, y => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x, y, z => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param()), Name(id='z', ctx=Param())], defaults=[None, None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x, => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x, y, => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x, y, z, => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param()), Name(id='z', ctx=Param())], defaults=[None, None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x=1 => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[Num(n=1)], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x=1, y=1 => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[Num(n=1), Num(n=1)], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x=1, y=1, z=1 => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param()), Name(id='z', ctx=Param())], defaults=[Num(n=1), Num(n=1), Num(n=1)], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x=1, => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[Num(n=1)], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x=1, y=1, => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[Num(n=1), Num(n=1)], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x=1, y=1, z=1, => x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param()), Name(id='z', ctx=Param())], defaults=[Num(n=1), Num(n=1), Num(n=1)], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('f({x => x})', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))], names=[], named=[])))
self.check('f({x, y => x})', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))], names=[], named=[])))
self.check('f({x, => x})', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))], names=[], named=[])))
self.check('f({x, y, => x})', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))], names=[], named=[])))
self.check('f(x => x)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))], names=[], named=[])))
self.check('{x => x}()', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[], names=[], named=[])))
self.check('{x, y => x}()', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[], names=[], named=[])))
self.check('{x, => x}()', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[], names=[], named=[])))
self.check('{x, y, => x}()', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[], names=[], named=[])))
self.check('{x => x}(1)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x, y => x}(1)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x, => x}(1)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x, y, => x}(1)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x => x}(1,)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x, y => x}(1,)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x, => x}(1,)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('{x, y, => x}(1,)', Suite(assignments=[], expression=FcnCall(function=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))), positional=[Num(n=1)], names=[], named=[])))
self.check('z = {x => x}; z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x => x} z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x, y => x}; z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x, y => x} z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x, => x}; z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x, => x} z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x, y, => x}; z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('z = {x, y, => x} z', Suite(assignments=[Assignment(lvalues=[Name(id='z', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='z', ctx=Load())))
self.check('{x => x;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => x;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => x;;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => ;x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => ;;x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => ;;;x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => ;x;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => ;;x;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => ;;;x;;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x; x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load()))], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x; x;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load()))], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x; x}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load()))], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x; x;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load()))], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x;; x;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load()))], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x;;; x;;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load()))], expression=Name(id='x', ctx=Load())))))
self.check('{x => y = x; z=y; z}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load())), Assignment(lvalues=[Name(id='z', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='z', ctx=Load())))))
self.check('{x => y = x; z=y; z;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load())), Assignment(lvalues=[Name(id='z', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='z', ctx=Load())))))
self.check('{x => y = x; z=y; z}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load())), Assignment(lvalues=[Name(id='z', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='z', ctx=Load())))))
self.check('{x => y = x; z=y; z;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load())), Assignment(lvalues=[Name(id='z', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='z', ctx=Load())))))
self.check('{x => y = x; z=y; z;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load())), Assignment(lvalues=[Name(id='z', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='z', ctx=Load())))))
self.check('{x => y = x;;; z=y;;; z;;;}', Suite(assignments=[], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Name(id='x', ctx=Load())), Assignment(lvalues=[Name(id='z', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='z', ctx=Load())))))
self.check('f()', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[], names=[], named=[])))
self.check('f(x)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load())], names=[], named=[])))
self.check('f(x,)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load())], names=[], named=[])))
self.check('f(x, y)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load()), Name(id='y', ctx=Load())], names=[], named=[])))
self.check('f(x, y,)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load()), Name(id='y', ctx=Load())], names=[], named=[])))
self.check('f(x, y, z)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load()), Name(id='y', ctx=Load()), Name(id='z', ctx=Load())], names=[], named=[])))
self.check('f(x, y, z,)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load()), Name(id='y', ctx=Load()), Name(id='z', ctx=Load())], names=[], named=[])))
self.check('f(x=1)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[], names=[Name(id='x', ctx=Param())], named=[Num(n=1)])))
self.check('f(x=1,)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[], names=[Name(id='x', ctx=Param())], named=[Num(n=1)])))
self.check('f(x=1, y)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='y', ctx=Load())], names=[Name(id='x', ctx=Param())], named=[Num(n=1)])))
self.check('f(x, y=1,)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load())], names=[Name(id='y', ctx=Param())], named=[Num(n=1)])))
self.check('f(x, y, z=1)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load()), Name(id='y', ctx=Load())], names=[Name(id='z', ctx=Param())], named=[Num(n=1)])))
self.check('f(x, y=1, z,)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[Name(id='x', ctx=Load()), Name(id='z', ctx=Load())], names=[Name(id='y', ctx=Param())], named=[Num(n=1)])))
self.check('x = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('def f(x): x; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x): x; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x) {x} x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x) {x}; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x,): x; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x,) {x} x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x,) {x}; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param())], defaults=[None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x, y): x; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x, y,) {x} x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x, y,) {x}; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x, y,) {x = y; x} x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('def f(x, y,) {x = y; x}; x', Suite(assignments=[Assignment(lvalues=[Name(id='f', ctx=Store())], expression=FcnDef(parameters=[Name(id='x', ctx=Param()), Name(id='y', ctx=Param())], defaults=[None, None], body=Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Name(id='y', ctx=Load()))], expression=Name(id='x', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('$', Suite(assignments=[], expression=AtArg(num=None)))
self.check('f($)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[AtArg(num=None)], names=[], named=[])))
self.check('$1', Suite(assignments=[], expression=AtArg(num=1)))
self.check('f($1)', Suite(assignments=[], expression=FcnCall(function=Name(id='f', ctx=Load()), positional=[AtArg(num=1)], names=[], named=[])))
self.check('0x123', Suite(assignments=[], expression=Num(n=291)))
self.check('0o123', Suite(assignments=[], expression=Num(n=83)))
self.check('3+4j', Suite(assignments=[], expression=BinOp(left=Num(n=3), op=Add(), right=Num(n=4j))))
self.check('x = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check(';x = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('x = 1; x;', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check(';x = 1; x;', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check(';;x = 1;; x;;', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check(';;;x = 1;;; x;;;', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('x, = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('x, y = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store()), Name(id='y', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('x, y, = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store()), Name(id='y', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('x, y, z = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store()), Name(id='y', ctx=Store()), Name(id='z', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('x, y, z, = 1; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store()), Name(id='y', ctx=Store()), Name(id='z', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load())))
self.check('if true: 1 else: 2', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2)))))
self.check('if true: {1} else: {2}', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2)))))
self.check('if true {1} else {2}', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2)))))
self.check('if true {x = 1; x} else {y = 2; y}', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load())))))
self.check('if true: 1 elif false: 2 else: 3', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3)))))
self.check('if true: {1} elif false: {2} else: {3}', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3)))))
self.check('if true {1} elif false {2} else {3}', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3)))))
self.check('if true: 1 elif false: 2 elif true: 3 else: 4', Suite(assignments=[], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load()), Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2)), Suite(assignments=[], expression=Num(n=3))], alternate=Suite(assignments=[], expression=Num(n=4)))))
self.check('x = if true: 1 else: 2; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {1} else: {2}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {1} else: {2} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {1} else {2}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {1} else {2} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1))], alternate=Suite(assignments=[], expression=Num(n=2))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else {y = 2; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: 1 elif false: 2 else: 3; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {1} elif false: {2} else: {3}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {1} elif false: {2} else: {3} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {1} elif false {2} else {3}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {1} elif false {2} else {3} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2))], alternate=Suite(assignments=[], expression=Num(n=3))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: 1 elif false: 2 elif true: 3 else: 4; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load()), Name(id='false', ctx=Load()), Name(id='true', ctx=Load())], consequents=[Suite(assignments=[], expression=Num(n=1)), Suite(assignments=[], expression=Num(n=2)), Suite(assignments=[], expression=Num(n=3))], alternate=Suite(assignments=[], expression=Num(n=4))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {;x = 1; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {;x = 1;; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {;;x = 1;; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {;;;x = 1;;; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {;x = 1; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {;x = 1;; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {;;x = 1;; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true: {;;;x = 1;;; x} else {y = 2; y}; x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else {;y = 2; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else {;y = 2;; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else {;;y = 2;; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else {;;;y = 2;;; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else: {;y = 2; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else: {;y = 2;; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else: {;;y = 2;; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check('x = if true {x = 1; x} else: {;;;y = 2;;; y} x', Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=IfChain(predicates=[Name(id='true', ctx=Load())], consequents=[Suite(assignments=[Assignment(lvalues=[Name(id='x', ctx=Store())], expression=Num(n=1))], expression=Name(id='x', ctx=Load()))], alternate=Suite(assignments=[Assignment(lvalues=[Name(id='y', ctx=Store())], expression=Num(n=2))], expression=Name(id='y', ctx=Load()))))], expression=Name(id='x', ctx=Load())))
self.check("{=> 3.14}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("{=> 3.14;}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("{=> 3.14;;}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("{=> ;3.14}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("{=> ;;3.14}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("{=> ;3.14;}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("{=> ;;3.14;;}", Suite(assignments=[], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14)))))
self.check("def pi(): 3.14; pi()", Suite(assignments=[Assignment(lvalues=[Name(id='pi', ctx=Store())], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14))))], expression=FcnCall(function=Name(id='pi', ctx=Load()), positional=[], names=[], named=[])))
self.check("def pi() {3.14} pi()", Suite(assignments=[Assignment(lvalues=[Name(id='pi', ctx=Store())], expression=FcnDef(parameters=[], defaults=[], body=Suite(assignments=[], expression=Num(n=3.14))))], expression=FcnCall(function=Name(id='pi', ctx=Load()), positional=[], names=[], named=[])))
self.check("x is integer", Suite(assignments=[], expression=TypeCheck(expr=Name(id='x', ctx=Load()), schema=Name(id='integer', ctx=Load()), negate=False)))
self.check("x is not real", Suite(assignments=[], expression=TypeCheck(expr=Name(id='x', ctx=Load()), schema=Name(id='real', ctx=Load()), negate=True)))
def test_normalizeLogic(self):
self.check("False", normalizeLogic(parse("True"), True), linenumbers=False)
self.check("True", normalizeLogic(parse("False"), True), linenumbers=False)
self.check("not x", normalizeLogic(parse("x"), True), linenumbers=False)
self.check("not x and not y", normalizeLogic(parse("x or y"), True), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("x or y or z"), True), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("(x or y) or z"), True), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("x or (y or z)"), True), linenumbers=False)
self.check("not x or not y", normalizeLogic(parse("x and y"), True), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("x and y and z"), True), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("(x and y) and z"), True), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("x and (y and z)"), True), linenumbers=False)
self.check("(not x or not y) and not z", normalizeLogic(parse("x and y or z"), True), linenumbers=False)
self.check("not x and (not y or not z)", normalizeLogic(parse("x or y and z"), True), linenumbers=False)
self.check("not x(1)", normalizeLogic(parse("x(1)"), True), linenumbers=False)
self.check("not x(1) and not y(2)", normalizeLogic(parse("x(1) or y(2)"), True), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("x(1) or y(2) or z(3)"), True), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("(x(1) or y(2)) or z(3)"), True), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("x(1) or (y(2) or z(3))"), True), linenumbers=False)
self.check("not x(1) or not y(2)", normalizeLogic(parse("x(1) and y(2)"), True), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("x(1) and y(2) and z(3)"), True), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("(x(1) and y(2)) and z(3)"), True), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("x(1) and (y(2) and z(3))"), True), linenumbers=False)
self.check("(not x(1) or not y(2)) and not z(3)", normalizeLogic(parse("x(1) and y(2) or z(3)"), True), linenumbers=False)
self.check("not x(1) and (not y(2) or not z(3))", normalizeLogic(parse("x(1) or y(2) and z(3)"), True), linenumbers=False)
self.check("not x.a", normalizeLogic(parse("x.a"), True), linenumbers=False)
self.check("not x.a and not y.b", normalizeLogic(parse("x.a or y.b"), True), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("x.a or y.b or z.c"), True), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("(x.a or y.b) or z.c"), True), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("x.a or (y.b or z.c)"), True), linenumbers=False)
self.check("not x.a or not y.b", normalizeLogic(parse("x.a and y.b"), True), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("x.a and y.b and z.c"), True), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("(x.a and y.b) and z.c"), True), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("x.a and (y.b and z.c)"), True), linenumbers=False)
self.check("(not x.a or not y.b) and not z.c", normalizeLogic(parse("x.a and y.b or z.c"), True), linenumbers=False)
self.check("not x.a and (not y.b or not z.c)", normalizeLogic(parse("x.a or y.b and z.c"), True), linenumbers=False)
self.check("False", normalizeLogic(parse("not True")), linenumbers=False)
self.check("True", normalizeLogic(parse("not False")), linenumbers=False)
self.check("not x", normalizeLogic(parse("not x")), linenumbers=False)
self.check("not x and not y", normalizeLogic(parse("not (x or y)")), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("not (x or y or z)")), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("not ((x or y) or z)")), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("not (x or (y or z))")), linenumbers=False)
self.check("not x or not y", normalizeLogic(parse("not (x and y)")), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("not (x and y and z)")), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("not ((x and y) and z)")), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("not (x and (y and z))")), linenumbers=False)
self.check("(not x or not y) and not z", normalizeLogic(parse("not (x and y or z)")), linenumbers=False)
self.check("not x and (not y or not z)", normalizeLogic(parse("not (x or y and z)")), linenumbers=False)
self.check("not x(1)", normalizeLogic(parse("not x(1)")), linenumbers=False)
self.check("not x(1) and not y(2)", normalizeLogic(parse("not (x(1) or y(2))")), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("not (x(1) or y(2) or z(3))")), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("not ((x(1) or y(2)) or z(3))")), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("not (x(1) or (y(2) or z(3)))")), linenumbers=False)
self.check("not x(1) or not y(2)", normalizeLogic(parse("not (x(1) and y(2))")), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("not (x(1) and y(2) and z(3))")), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("not ((x(1) and y(2)) and z(3))")), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("not (x(1) and (y(2) and z(3)))")), linenumbers=False)
self.check("(not x(1) or not y(2)) and not z(3)", normalizeLogic(parse("not (x(1) and y(2) or z(3))")), linenumbers=False)
self.check("not x(1) and (not y(2) or not z(3))", normalizeLogic(parse("not (x(1) or y(2) and z(3))")), linenumbers=False)
self.check("not x.a", normalizeLogic(parse("not x.a")), linenumbers=False)
self.check("not x.a and not y.b", normalizeLogic(parse("not (x.a or y.b)")), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("not (x.a or y.b or z.c)")), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("not ((x.a or y.b) or z.c)")), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("not (x.a or (y.b or z.c))")), linenumbers=False)
self.check("not x.a or not y.b", normalizeLogic(parse("not (x.a and y.b)")), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("not (x.a and y.b and z.c)")), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("not ((x.a and y.b) and z.c)")), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("not (x.a and (y.b and z.c))")), linenumbers=False)
self.check("(not x.a or not y.b) and not z.c", normalizeLogic(parse("not (x.a and y.b or z.c)")), linenumbers=False)
self.check("not x.a and (not y.b or not z.c)", normalizeLogic(parse("not (x.a or y.b and z.c)")), linenumbers=False)
self.check("False", normalizeLogic(parse("not not not True")), linenumbers=False)
self.check("True", normalizeLogic(parse("not not not False")), linenumbers=False)
self.check("not x", normalizeLogic(parse("not not not x")), linenumbers=False)
self.check("not x and not y", normalizeLogic(parse("not not not (x or y)")), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("not not not (x or y or z)")), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("not not not ((x or y) or z)")), linenumbers=False)
self.check("not x and not y and not z", normalizeLogic(parse("not not not (x or (y or z))")), linenumbers=False)
self.check("not x or not y", normalizeLogic(parse("not not not (x and y)")), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("not not not (x and y and z)")), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("not not not ((x and y) and z)")), linenumbers=False)
self.check("not x or not y or not z", normalizeLogic(parse("not not not (x and (y and z))")), linenumbers=False)
self.check("(not x or not y) and not z", normalizeLogic(parse("not not not (x and y or z)")), linenumbers=False)
self.check("not x and (not y or not z)", normalizeLogic(parse("not not not (x or y and z)")), linenumbers=False)
self.check("not x(1)", normalizeLogic(parse("not not not x(1)")), linenumbers=False)
self.check("not x(1) and not y(2)", normalizeLogic(parse("not not not (x(1) or y(2))")), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("not not not (x(1) or y(2) or z(3))")), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("not not not ((x(1) or y(2)) or z(3))")), linenumbers=False)
self.check("not x(1) and not y(2) and not z(3)", normalizeLogic(parse("not not not (x(1) or (y(2) or z(3)))")), linenumbers=False)
self.check("not x(1) or not y(2)", normalizeLogic(parse("not not not (x(1) and y(2))")), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("not not not (x(1) and y(2) and z(3))")), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("not not not ((x(1) and y(2)) and z(3))")), linenumbers=False)
self.check("not x(1) or not y(2) or not z(3)", normalizeLogic(parse("not not not (x(1) and (y(2) and z(3)))")), linenumbers=False)
self.check("(not x(1) or not y(2)) and not z(3)", normalizeLogic(parse("not not not (x(1) and y(2) or z(3))")), linenumbers=False)
self.check("not x(1) and (not y(2) or not z(3))", normalizeLogic(parse("not not not (x(1) or y(2) and z(3))")), linenumbers=False)
self.check("not x.a", normalizeLogic(parse("not not not x.a")), linenumbers=False)
self.check("not x.a and not y.b", normalizeLogic(parse("not not not (x.a or y.b)")), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("not not not (x.a or y.b or z.c)")), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("not not not ((x.a or y.b) or z.c)")), linenumbers=False)
self.check("not x.a and not y.b and not z.c", normalizeLogic(parse("not not not (x.a or (y.b or z.c))")), linenumbers=False)
self.check("not x.a or not y.b", normalizeLogic(parse("not not not (x.a and y.b)")), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("not not not (x.a and y.b and z.c)")), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("not not not ((x.a and y.b) and z.c)")), linenumbers=False)
self.check("not x.a or not y.b or not z.c", normalizeLogic(parse("not not not (x.a and (y.b and z.c))")), linenumbers=False)
self.check("(not x.a or not y.b) and not z.c", normalizeLogic(parse("not not not (x.a and y.b or z.c)")), linenumbers=False)
self.check("not x.a and (not y.b or not z.c)", normalizeLogic(parse("not not not (x.a or y.b and z.c)")), linenumbers=False)
self.check("x != y", normalizeLogic(parse("not x == y")), linenumbers=False)
self.check("x == y", normalizeLogic(parse("not x != y")), linenumbers=False)
self.check("x <= y", normalizeLogic(parse("not x > y")), linenumbers=False)
self.check("x > y", normalizeLogic(parse("not x <= y")), linenumbers=False)
self.check("x >= y", normalizeLogic(parse("not x < y")), linenumbers=False)
self.check("x < y", normalizeLogic(parse("not x >= y")), linenumbers=False)
self.check("x not in y", normalizeLogic(parse("not x in y")), linenumbers=False)
self.check("x in y", normalizeLogic(parse("not x not in y")), linenumbers=False)
self.check("x == y and y == z", normalizeLogic(parse("x == y and y == z")), linenumbers=False)
self.check("x == y and y == z", normalizeLogic(parse("x == y == z")), linenumbers=False)
self.check("x == y or y == z", normalizeLogic(parse("x == y or y == z")), linenumbers=False)
self.check("x != y or y != z", normalizeLogic(parse("not (x == y and y == z)")), linenumbers=False)
self.check("x != y or y != z", normalizeLogic(parse("not (x == y == z)")), linenumbers=False)
self.check("x != y or y == z", normalizeLogic(parse("not (x == y and not y == z)")), linenumbers=False)
self.check("a and b and c and d and e", normalizeLogic(parse("a and (b and (c and (d and e)))")), linenumbers=False)
self.check("a and b and c and d and e", normalizeLogic(parse("(((a and b) and c) and d) and e")), linenumbers=False)
| apache-2.0 | -5,485,388,087,580,594,000 | 90.039953 | 518 | 0.569151 | false |
ceibal-tatu/sugar-toolkit-gtk3 | src/sugar3/activity/widgets.py | 1 | 13471 | # Copyright (C) 2009, Aleksey Lim, Simon Schampijer
# Copyright (C) 2012, Walter Bender
# Copyright (C) 2012, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from gi.repository import Gdk
from gi.repository import Gtk
import gettext
from gi.repository import GConf
import logging
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.graphics.radiopalette import RadioPalette, RadioMenuButton
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.toolbox import Toolbox
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics.icon import Icon
from sugar3.bundle.activitybundle import ActivityBundle
from sugar3.graphics import style
from sugar3.graphics.palettemenu import PaletteMenuBox
_ = lambda msg: gettext.dgettext('sugar-toolkit', msg)
def _create_activity_icon(metadata):
if metadata is not None and metadata.get('icon-color'):
color = XoColor(metadata['icon-color'])
else:
client = GConf.Client.get_default()
color = XoColor(client.get_string('/desktop/sugar/user/color'))
from sugar3.activity.activity import get_bundle_path
bundle = ActivityBundle(get_bundle_path())
icon = Icon(file=bundle.get_icon(), xo_color=color)
return icon
class ActivityButton(ToolButton):
def __init__(self, activity, **kwargs):
ToolButton.__init__(self, **kwargs)
icon = _create_activity_icon(activity.metadata)
self.set_icon_widget(icon)
icon.show()
self.props.hide_tooltip_on_click = False
self.palette_invoker.props.toggle_palette = True
self.props.tooltip = activity.metadata['title']
activity.metadata.connect('updated', self.__jobject_updated_cb)
def __jobject_updated_cb(self, jobject):
self.props.tooltip = jobject['title']
class ActivityToolbarButton(ToolbarButton):
def __init__(self, activity, **kwargs):
toolbar = ActivityToolbar(activity, orientation_left=True)
ToolbarButton.__init__(self, page=toolbar, **kwargs)
icon = _create_activity_icon(activity.metadata)
self.set_icon_widget(icon)
icon.show()
class StopButton(ToolButton):
def __init__(self, activity, **kwargs):
ToolButton.__init__(self, 'activity-stop', **kwargs)
self.props.tooltip = _('Stop')
self.props.accelerator = '<Ctrl>Q'
self.connect('clicked', self.__stop_button_clicked_cb, activity)
def __stop_button_clicked_cb(self, button, activity):
activity.close()
class UndoButton(ToolButton):
def __init__(self, **kwargs):
ToolButton.__init__(self, 'edit-undo', **kwargs)
self.props.tooltip = _('Undo')
self.props.accelerator = '<Ctrl>Z'
class RedoButton(ToolButton):
def __init__(self, **kwargs):
ToolButton.__init__(self, 'edit-redo', **kwargs)
self.props.tooltip = _('Redo')
class CopyButton(ToolButton):
def __init__(self, **kwargs):
ToolButton.__init__(self, 'edit-copy', **kwargs)
self.props.tooltip = _('Copy')
self.props.accelerator = '<Ctrl>C'
class PasteButton(ToolButton):
def __init__(self, **kwargs):
ToolButton.__init__(self, 'edit-paste', **kwargs)
self.props.tooltip = _('Paste')
self.props.accelerator = '<Ctrl>V'
class ShareButton(RadioMenuButton):
def __init__(self, activity, **kwargs):
palette = RadioPalette()
self.private = RadioToolButton(
icon_name='zoom-home')
palette.append(self.private, _('Private'))
self.neighborhood = RadioToolButton(
icon_name='zoom-neighborhood',
group=self.private)
self._neighborhood_handle = self.neighborhood.connect(
'clicked', self.__neighborhood_clicked_cb, activity)
palette.append(self.neighborhood, _('My Neighborhood'))
activity.connect('shared', self.__update_share_cb)
activity.connect('joined', self.__update_share_cb)
RadioMenuButton.__init__(self, **kwargs)
self.props.palette = palette
if activity.max_participants == 1:
self.props.sensitive = False
def __neighborhood_clicked_cb(self, button, activity):
activity.share()
def __update_share_cb(self, activity):
self.neighborhood.handler_block(self._neighborhood_handle)
try:
if activity.shared_activity is not None and \
not activity.shared_activity.props.private:
self.private.props.sensitive = False
self.neighborhood.props.sensitive = False
self.neighborhood.props.active = True
else:
self.private.props.sensitive = True
self.neighborhood.props.sensitive = True
self.private.props.active = True
finally:
self.neighborhood.handler_unblock(self._neighborhood_handle)
class TitleEntry(Gtk.ToolItem):
def __init__(self, activity, **kwargs):
Gtk.ToolItem.__init__(self)
self.set_expand(False)
self.entry = Gtk.Entry(**kwargs)
self.entry.set_size_request(int(Gdk.Screen.width() / 3), -1)
self.entry.set_text(activity.metadata['title'])
self.entry.connect('focus-out-event', self.__title_changed_cb, activity)
self.entry.connect('button-press-event', self.__button_press_event_cb)
self.entry.show()
self.add(self.entry)
activity.metadata.connect('updated', self.__jobject_updated_cb)
activity.connect('_closing', self.__closing_cb)
def modify_bg(self, state, color):
Gtk.ToolItem.modify_bg(self, state, color)
self.entry.modify_bg(state, color)
def __jobject_updated_cb(self, jobject):
if self.entry.has_focus():
return
if self.entry.get_text() == jobject['title']:
return
self.entry.set_text(jobject['title'])
def __closing_cb(self, activity):
self.save_title(activity)
return False
def __title_changed_cb(self, editable, event, activity):
self.save_title(activity)
return False
def __button_press_event_cb(self, widget, event):
if widget.is_focus():
return False
else:
widget.grab_focus()
widget.select_region(0, -1)
return True
def save_title(self, activity):
title = self.entry.get_text()
if title == activity.metadata['title']:
return
activity.metadata['title'] = title
activity.metadata['title_set_by_user'] = '1'
activity.save()
activity.set_title(title)
shared_activity = activity.get_shared_activity()
if shared_activity is not None:
shared_activity.props.name = title
class DescriptionItem(ToolButton):
def __init__(self, activity, **kwargs):
ToolButton.__init__(self, 'edit-description', **kwargs)
self.set_tooltip(_('Description'))
self.palette_invoker.props.toggle_palette = True
self.palette_invoker.props.lock_palette = True
self.props.hide_tooltip_on_click = False
self._palette = self.get_palette()
description_box = PaletteMenuBox()
sw = Gtk.ScrolledWindow()
sw.set_size_request(int(Gdk.Screen.width() / 2),
2 * style.GRID_CELL_SIZE)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self._text_view = Gtk.TextView()
self._text_view.set_left_margin(style.DEFAULT_PADDING)
self._text_view.set_right_margin(style.DEFAULT_PADDING)
self._text_view.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
text_buffer = Gtk.TextBuffer()
if 'description' in activity.metadata:
text_buffer.set_text(activity.metadata['description'])
self._text_view.set_buffer(text_buffer)
self._text_view.connect('focus-out-event',
self.__description_changed_cb, activity)
sw.add(self._text_view)
description_box.append_item(sw, vertical_padding=0)
self._palette.set_content(description_box)
description_box.show_all()
activity.metadata.connect('updated', self.__jobject_updated_cb)
def set_expanded(self, expanded):
box = self.toolbar_box
if not box:
return
if not expanded:
self.palette_invoker.notify_popdown()
return
if box.expanded_button is not None:
box.expanded_button.queue_draw()
if box.expanded_button != self:
box.expanded_button.set_expanded(False)
box.expanded_button = self
def get_toolbar_box(self):
parent = self.get_parent()
if not hasattr(parent, 'owner'):
return None
return parent.owner
toolbar_box = property(get_toolbar_box)
def _get_text_from_buffer(self):
buf = self._text_view.get_buffer()
start_iter = buf.get_start_iter()
end_iter = buf.get_end_iter()
return buf.get_text(start_iter, end_iter, False)
def __jobject_updated_cb(self, jobject):
if self._text_view.has_focus():
return
if 'description' not in jobject:
return
if self._get_text_from_buffer() == jobject['description']:
return
buf = self._text_view.get_buffer()
buf.set_text(jobject['description'])
def __description_changed_cb(self, widget, event, activity):
description = self._get_text_from_buffer()
if 'description' in activity.metadata and \
description == activity.metadata['description']:
return
activity.metadata['description'] = description
activity.save()
return False
class ActivityToolbar(Gtk.Toolbar):
"""The Activity toolbar with the Journal entry title and sharing button"""
def __init__(self, activity, orientation_left=False):
Gtk.Toolbar.__init__(self)
self._activity = activity
if activity.metadata:
title_button = TitleEntry(activity)
title_button.show()
self.insert(title_button, -1)
self.title = title_button.entry
if orientation_left == False:
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
self.insert(separator, -1)
separator.show()
if activity.metadata:
description_item = DescriptionItem(activity)
description_item.show()
self.insert(description_item, -1)
self.share = ShareButton(activity)
self.share.show()
self.insert(self.share, -1)
class EditToolbar(Gtk.Toolbar):
"""Provides the standard edit toolbar for Activities.
Members:
undo -- the undo button
redo -- the redo button
copy -- the copy button
paste -- the paste button
separator -- A separator between undo/redo and copy/paste
This class only provides the 'edit' buttons in a standard layout,
your activity will need to either hide buttons which make no sense for your
Activity, or you need to connect the button events to your own callbacks:
## Example from Read.activity:
# Create the edit toolbar:
self._edit_toolbar = EditToolbar(self._view)
# Hide undo and redo, they're not needed
self._edit_toolbar.undo.props.visible = False
self._edit_toolbar.redo.props.visible = False
# Hide the separator too:
self._edit_toolbar.separator.props.visible = False
# As long as nothing is selected, copy needs to be insensitive:
self._edit_toolbar.copy.set_sensitive(False)
# When the user clicks the button, call _edit_toolbar_copy_cb()
self._edit_toolbar.copy.connect('clicked', self._edit_toolbar_copy_cb)
# Add the edit toolbar:
toolbox.add_toolbar(_('Edit'), self._edit_toolbar)
# And make it visible:
self._edit_toolbar.show()
"""
def __init__(self):
Gtk.Toolbar.__init__(self)
self.undo = UndoButton()
self.insert(self.undo, -1)
self.undo.show()
self.redo = RedoButton()
self.insert(self.redo, -1)
self.redo.show()
self.separator = Gtk.SeparatorToolItem()
self.separator.set_draw(True)
self.insert(self.separator, -1)
self.separator.show()
self.copy = CopyButton()
self.insert(self.copy, -1)
self.copy.show()
self.paste = PasteButton()
self.insert(self.paste, -1)
self.paste.show()
| lgpl-2.1 | -841,100,998,823,937,800 | 32.6775 | 80 | 0.630317 | false |
neavouli/yournextrepresentative | candidates/tests/test_merge_people.py | 3 | 12037 | from __future__ import unicode_literals
from django.test import TestCase
from ..models import merge_popit_people
class TestMergePeople(TestCase):
def test_merge_basic_unknown_details(self):
primary = {
'foo': 'bar',
'quux': 'xyzzy',
}
secondary = {
'foo': 'baz',
'hello': 'goodbye',
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'foo': 'bar',
'quux': 'xyzzy',
'hello': 'goodbye',
}
)
def test_merge_arrays(self):
primary = {
'some-list': ['a', 'b', 'c'],
}
secondary = {
'some-list': ['b', 'c', 'd'],
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'some-list': ['a', 'b', 'c', 'd'],
}
)
def test_merge_array_primary_null(self):
primary = {
'some-list': None,
}
secondary = {
'some-list': ['a', 'b', 'c'],
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'some-list': ['a', 'b', 'c'],
}
)
def test_merge_array_primary_null(self):
primary = {
'some-list': None,
}
secondary = {
'some-list': ['a', 'b', 'c'],
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'some-list': ['a', 'b', 'c'],
}
)
def test_merge_array_secondary_null(self):
primary = {
'some-list': ['a', 'b', 'c'],
}
secondary = {
'some-list': None,
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'some-list': ['a', 'b', 'c'],
}
)
def test_merge_standing_in_contradicting(self):
primary = {
'standing_in': {
'2010': {
'name': 'Edinburgh East',
'post_id': '14419',
'mapit_url': 'http://mapit.mysociety.org/area/14419',
},
'2015': {
'name': 'Edinburgh North and Leith',
'post_id': '14420',
'mapit_url': 'http://mapit.mysociety.org/area/14420',
'elected': True,
},
}
}
secondary = {
'standing_in': {
'2010': {
'name': 'Aberdeen South',
'post_id': '14399',
'mapit_url': 'http://mapit.mysociety.org/area/14399',
},
'2015': {
'name': 'Aberdeen North',
'post_id': '14398',
'mapit_url': 'http://mapit.mysociety.org/area/14398',
},
},
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'standing_in': {
'2010': {
'name': 'Edinburgh East',
'post_id': '14419',
'mapit_url': 'http://mapit.mysociety.org/area/14419',
},
'2015': {
'name': 'Edinburgh North and Leith',
'post_id': '14420',
'mapit_url': 'http://mapit.mysociety.org/area/14420',
'elected': True,
},
}
}
)
def test_merge_standing_in_2015_null_in_primary(self):
primary = {
'standing_in': {
'2010': {
'name': 'Edinburgh East',
'post_id': '14419',
'mapit_url': 'http://mapit.mysociety.org/area/14419',
},
'2015': None,
}
}
secondary = {
'standing_in': {
'2010': {
'name': 'Aberdeen South',
'post_id': '14399',
'mapit_url': 'http://mapit.mysociety.org/area/14399',
},
'2015': {
'name': 'Aberdeen North',
'post_id': '14398',
'mapit_url': 'http://mapit.mysociety.org/area/14398',
'elected': False,
},
},
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'standing_in': {
'2010': {
'name': 'Edinburgh East',
'post_id': '14419',
'mapit_url': 'http://mapit.mysociety.org/area/14419',
},
'2015': {
'name': 'Aberdeen North',
'post_id': '14398',
'mapit_url': 'http://mapit.mysociety.org/area/14398',
'elected': False,
},
}
}
)
def test_merge_standing_in_2015_null_in_secondary(self):
primary = {
'standing_in': {
'2010': {
'name': 'Edinburgh East',
'post_id': '14419',
'mapit_url': 'http://mapit.mysociety.org/area/14419',
},
'2015': {
'name': 'Edinburgh North and Leith',
'post_id': '14420',
'mapit_url': 'http://mapit.mysociety.org/area/14420',
},
}
}
secondary = {
'standing_in': {
'2010': {
'name': 'Aberdeen South',
'post_id': '14399',
'mapit_url': 'http://mapit.mysociety.org/area/14399',
},
'2015': None
},
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'standing_in': {
'2010': {
'name': 'Edinburgh East',
'post_id': '14419',
'mapit_url': 'http://mapit.mysociety.org/area/14419',
},
'2015': {
'name': 'Edinburgh North and Leith',
'post_id': '14420',
'mapit_url': 'http://mapit.mysociety.org/area/14420',
},
}
}
)
def test_merge_conflicting_names(self):
primary = {
'name': 'Dave Cameron',
}
secondary = {
'name': 'David Cameron',
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'name': 'Dave Cameron',
'other_names': [
{'name': 'David Cameron'}
]
}
)
def test_fuller_merge_example(self):
primary = {
"name": "Julian Huppert",
"other_names": [
{
"end_date": None,
"id": "54b3fadc1f10dde30b97b3c4",
"name": "Julian Leon Huppert",
"note": "His full name, including the middle name ",
"start_date": None
}
],
"party_ppc_page_url": "http://www.libdems.org.uk/julian_huppert",
"proxy_image": "http://candidates-posts.127.0.0.1.xip.io:3000/image-proxy//http%3A%2F%2Fyournextmp.popit.mysociety.org%2Fpersons%2F47%2Fimage%2F5481e8e0b150e238702c060d",
"twitter_username": "JulianHuppert",
"wikipedia_url": "https://en.wikipedia.org/wiki/Julian_Huppert"
}
secondary = {
"name": "Julian Huppert As Well",
"other_names": [],
"party_ppc_page_url": "",
"proxy_image": None,
"twitter_username": "",
"wikipedia_url": ""
}
expected_result = {
"name": "Julian Huppert",
"other_names": [
{
"end_date": None,
"id": "54b3fadc1f10dde30b97b3c4",
"name": "Julian Leon Huppert",
"note": "His full name, including the middle name ",
"start_date": None
},
{
'name': 'Julian Huppert As Well',
},
],
"party_ppc_page_url": "http://www.libdems.org.uk/julian_huppert",
"proxy_image": "http://candidates-posts.127.0.0.1.xip.io:3000/image-proxy//http%3A%2F%2Fyournextmp.popit.mysociety.org%2Fpersons%2F47%2Fimage%2F5481e8e0b150e238702c060d",
"twitter_username": "JulianHuppert",
"wikipedia_url": "https://en.wikipedia.org/wiki/Julian_Huppert"
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
expected_result
)
def test_merge_conflicting_names_previous_other_names(self):
primary = {
'name': 'Dave Cameron',
'other_names': [
{'name': 'David W D Cameron'}
]
}
secondary = {
'name': 'David Cameron',
'other_names': [
{'name': 'David William Donald Cameron'}
]
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
set(merged.keys()),
set(['name', 'other_names'])
)
self.assertEqual(merged['name'], 'Dave Cameron')
sorted_other_names = sorted(
merged['other_names'],
key=lambda e: e['name']
)
self.assertEqual(
sorted_other_names,
[
{'name': 'David Cameron'},
{'name': 'David W D Cameron'},
{'name': 'David William Donald Cameron'},
],
)
def test_merge_versions(self):
primary = {
'name': 'Dave Cameron',
'versions': [
{
"version_id": "12fdb2d20e9e0753",
"information_source": "Some random update",
},
{
"version_id": "3570e9e02d2bdf21",
"information_source": "Original import",
},
]
}
secondary = {
'name': 'David Cameron',
'versions': [
{
"version_id": "b6fafb50a424b012",
"information_source": "Creation of a duplicate",
},
]
}
merged = merge_popit_people(primary, secondary)
self.assertEqual(
merged,
{
'name': 'Dave Cameron',
'other_names': [
{'name': 'David Cameron'}
],
'versions': [
{
"version_id": "12fdb2d20e9e0753",
"information_source": "Some random update",
},
{
"version_id": "3570e9e02d2bdf21",
"information_source": "Original import",
},
{
"version_id": "b6fafb50a424b012",
"information_source": "Creation of a duplicate",
},
]
}
)
| agpl-3.0 | -4,818,878,524,292,432,000 | 31.184492 | 182 | 0.390047 | false |
alphaBenj/zipline | zipline/utils/sqlite_utils.py | 2 | 1739 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import os
import sqlite3
import sqlalchemy as sa
from six.moves import range
from .input_validation import coerce_string
SQLITE_MAX_VARIABLE_NUMBER = 998
def group_into_chunks(items, chunk_size=SQLITE_MAX_VARIABLE_NUMBER):
items = list(items)
return [items[x:x+chunk_size]
for x in range(0, len(items), chunk_size)]
def verify_sqlite_path_exists(path):
if path != ':memory:' and not os.path.exists(path):
raise ValueError("SQLite file {!r} doesn't exist.".format(path))
def check_and_create_connection(path, require_exists):
if require_exists:
verify_sqlite_path_exists(path)
return sqlite3.connect(path)
def check_and_create_engine(path, require_exists):
if require_exists:
verify_sqlite_path_exists(path)
return sa.create_engine('sqlite:///' + path)
def coerce_string_to_conn(require_exists):
return coerce_string(
partial(check_and_create_connection, require_exists=require_exists)
)
def coerce_string_to_eng(require_exists):
return coerce_string(
partial(check_and_create_engine, require_exists=require_exists)
)
| apache-2.0 | -4,548,816,480,827,893,000 | 28.474576 | 75 | 0.723404 | false |
larryhynes/qutebrowser | qutebrowser/test/utils/usertypes/test_enum.py | 2 | 1944 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the Enum class."""
import unittest
from qutebrowser.utils import usertypes
# FIXME: Add some more tests, e.g. for is_int
class EnumTests(unittest.TestCase):
"""Test simple enums.
Attributes:
enum: The enum we're testing.
"""
def setUp(self):
self.enum = usertypes.enum('Enum', ['one', 'two'])
def test_values(self):
"""Test if enum members resolve to the right values."""
self.assertEqual(self.enum.one.value, 1)
self.assertEqual(self.enum.two.value, 2)
def test_name(self):
"""Test .name mapping."""
self.assertEqual(self.enum.one.name, 'one')
self.assertEqual(self.enum.two.name, 'two')
def test_unknown(self):
"""Test invalid values which should raise an AttributeError."""
with self.assertRaises(AttributeError):
_ = self.enum.three
def test_start(self):
"""Test the start= argument."""
e = usertypes.enum('Enum', ['three', 'four'], start=3)
self.assertEqual(e.three.value, 3)
self.assertEqual(e.four.value, 4)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,436,881,742,198,679,000 | 29.857143 | 74 | 0.670267 | false |
rupertsmall/DIGITS | digits/dataset/images/classification/views.py | 7 | 10610 | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import flask
from digits import utils
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import app, scheduler, autodoc
from digits.dataset import tasks
from forms import ImageClassificationDatasetForm
from job import ImageClassificationDatasetJob
NAMESPACE = '/datasets/images/classification'
def from_folders(job, form):
"""
Add tasks for creating a dataset by parsing folders of images
"""
job.labels_file = utils.constants.LABELS_FILE
### Add ParseFolderTask
percent_val = form.folder_pct_val.data
val_parents = []
if form.has_val_folder.data:
percent_val = 0
percent_test = form.folder_pct_test.data
test_parents = []
if form.has_test_folder.data:
percent_test = 0
min_per_class = form.folder_train_min_per_class.data
max_per_class = form.folder_train_max_per_class.data
parse_train_task = tasks.ParseFolderTask(
job_dir = job.dir(),
folder = form.folder_train.data,
percent_val = percent_val,
percent_test = percent_test,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_train_task)
# set parents
if not form.has_val_folder.data:
val_parents = [parse_train_task]
if not form.has_test_folder.data:
test_parents = [parse_train_task]
if form.has_val_folder.data:
min_per_class = form.folder_val_min_per_class.data
max_per_class = form.folder_val_max_per_class.data
parse_val_task = tasks.ParseFolderTask(
job_dir = job.dir(),
parents = parse_train_task,
folder = form.folder_val.data,
percent_val = 100,
percent_test = 0,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_val_task)
val_parents = [parse_val_task]
if form.has_test_folder.data:
min_per_class = form.folder_test_min_per_class.data
max_per_class = form.folder_test_max_per_class.data
parse_test_task = tasks.ParseFolderTask(
job_dir = job.dir(),
parents = parse_train_task,
folder = form.folder_test.data,
percent_val = 0,
percent_test = 100,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_test_task)
test_parents = [parse_test_task]
### Add CreateDbTasks
encoding = form.encoding.data
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = parse_train_task,
input_file = utils.constants.TRAIN_FILE,
db_name = utils.constants.TRAIN_DB,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
mean_file = utils.constants.MEAN_FILE_CAFFE,
labels_file = job.labels_file,
)
)
if percent_val > 0 or form.has_val_folder.data:
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = val_parents,
input_file = utils.constants.VAL_FILE,
db_name = utils.constants.VAL_DB,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
labels_file = job.labels_file,
)
)
if percent_test > 0 or form.has_test_folder.data:
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = test_parents,
input_file = utils.constants.TEST_FILE,
db_name = utils.constants.TEST_DB,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
labels_file = job.labels_file,
)
)
def from_files(job, form):
"""
Add tasks for creating a dataset by reading textfiles
"""
### labels
if form.textfile_use_local_files.data:
job.labels_file = form.textfile_local_labels_file.data.strip()
else:
flask.request.files[form.textfile_labels_file.name].save(
os.path.join(job.dir(), utils.constants.LABELS_FILE)
)
job.labels_file = utils.constants.LABELS_FILE
encoding = form.encoding.data
shuffle = bool(form.textfile_shuffle.data)
### train
if form.textfile_use_local_files.data:
train_file = form.textfile_local_train_images.data.strip()
else:
flask.request.files[form.textfile_train_images.name].save(
os.path.join(job.dir(), utils.constants.TRAIN_FILE)
)
train_file = utils.constants.TRAIN_FILE
image_folder = form.textfile_train_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = train_file,
db_name = utils.constants.TRAIN_DB,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
mean_file = utils.constants.MEAN_FILE_CAFFE,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
### val
if form.textfile_use_val.data:
if form.textfile_use_local_files.data:
val_file = form.textfile_local_val_images.data.strip()
else:
flask.request.files[form.textfile_val_images.name].save(
os.path.join(job.dir(), utils.constants.VAL_FILE)
)
val_file = utils.constants.VAL_FILE
image_folder = form.textfile_val_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = val_file,
db_name = utils.constants.VAL_DB,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
### test
if form.textfile_use_test.data:
if form.textfile_use_local_files.data:
test_file = form.textfile_local_test_images.data.strip()
else:
flask.request.files[form.textfile_test_images.name].save(
os.path.join(job.dir(), utils.constants.TEST_FILE)
)
test_file = utils.constants.TEST_FILE
image_folder = form.textfile_test_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = test_file,
db_name = utils.constants.TEST_DB,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
@app.route(NAMESPACE + '/new', methods=['GET'])
@autodoc('datasets')
def image_classification_dataset_new():
"""
Returns a form for a new ImageClassificationDatasetJob
"""
form = ImageClassificationDatasetForm()
return flask.render_template('datasets/images/classification/new.html', form=form)
@app.route(NAMESPACE + '.json', methods=['POST'])
@app.route(NAMESPACE, methods=['POST'])
@autodoc(['datasets', 'api'])
def image_classification_dataset_create():
"""
Creates a new ImageClassificationDatasetJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationDatasetForm()
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('datasets/images/classification/new.html', form=form), 400
job = None
try:
job = ImageClassificationDatasetJob(
name = form.dataset_name.data,
image_dims = (
int(form.resize_height.data),
int(form.resize_width.data),
int(form.resize_channels.data),
),
resize_mode = form.resize_mode.data
)
if form.method.data == 'folder':
from_folders(job, form)
elif form.method.data == 'textfile':
from_files(job, form)
else:
raise ValueError('method not supported')
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('datasets_show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
def show(job):
"""
Called from digits.dataset.views.datasets_show()
"""
return flask.render_template('datasets/images/classification/show.html', job=job)
@app.route(NAMESPACE + '/summary', methods=['GET'])
@autodoc('datasets')
def image_classification_dataset_summary():
"""
Return a short HTML summary of a DatasetJob
"""
job = job_from_request()
return flask.render_template('datasets/images/classification/summary.html', dataset=job)
| bsd-3-clause | 4,793,222,573,061,883,000 | 33.673203 | 99 | 0.542036 | false |
dmazzer/nors | remote/GrovePi/Software/Python/grove_ph_sensor.py | 2 | 2094 | #!/usr/bin/env python
#
# GrovePi Example for using the Grove PH Sensor (http://www.seeedstudio.com/wiki/Grove_-_PH_Sensor)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove PH Sensor to analog port A0
# SIG,NC,VCC,GND
sensor = 0
grovepi.pinMode(sensor,"INPUT")
time.sleep(1)
# Reference voltage of ADC is 5v
adc_ref = 5
while True:
try:
# Read sensor value
sensor_value = grovepi.analogRead(sensor)
# Calculate PH
ph = 7 - 1000 * (float)(sensor_value) * adc_ref / 59.16 / 1023
print ("sensor_value =", sensor_value, " ph =", ph)
except IOError:
print ("Error")
| mit | -4,857,175,944,731,164,000 | 34.491525 | 139 | 0.746896 | false |
GeoNode/geonode | geonode/services/tests.py | 1 | 39827 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
from geonode.services.enumerations import WMS, INDEXED
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import Client
from selenium import webdriver
from unittest import TestCase as StandardTestCase
from flaky import flaky
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.template.defaultfilters import slugify
try:
import unittest.mock as mock
except ImportError:
import mock
from owslib.map.wms111 import ContentMetadata
from geonode.layers.models import Layer
from geonode.tests.base import GeoNodeBaseTestSupport
from geonode.services.utils import test_resource_table_status
from . import enumerations, forms
from .models import HarvestJob, Service
from .serviceprocessors import (
base,
handler,
wms,
arcgis)
from .serviceprocessors.arcgis import MapLayer
from .serviceprocessors.wms import WebMapService
from arcrest import MapService as ArcMapService
from owslib.wms import WebMapService as OwsWebMapService
from collections import namedtuple
logger = logging.getLogger(__name__)
class ModuleFunctionsTestCase(StandardTestCase):
@mock.patch("geonode.services.serviceprocessors.base.catalog",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.base.settings",
autospec=True)
def test_get_cascading_workspace_returns_existing(self, mock_settings,
mock_catalog):
mock_settings.OGC_SERVER = {
"default": {
"LOCATION": "nowhere/",
"USER": "nouser",
"PASSWORD": "nopass",
}
}
mock_settings.CASCADE_WORKSPACE = "something"
phony_workspace = "fake"
cat = mock_catalog
cat.get_workspace.return_value = phony_workspace
result = base.get_geoserver_cascading_workspace(
create=False)
self.assertEqual(result, phony_workspace)
cat.get_workspace.assert_called_with(mock_settings.CASCADE_WORKSPACE)
@mock.patch("geonode.services.serviceprocessors.base.catalog",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.base.settings",
autospec=True)
def test_get_cascading_workspace_creates_new_workspace(self, mock_settings,
mock_catalog):
mock_settings.OGC_SERVER = {
"default": {
"LOCATION": "nowhere/",
"USER": "nouser",
"PASSWORD": "nopass",
}
}
mock_settings.CASCADE_WORKSPACE = "something"
phony_workspace = "fake"
cat = mock_catalog
cat.get_workspace.return_value = None
cat.create_workspace.return_value = phony_workspace
result = base.get_geoserver_cascading_workspace(
create=True)
self.assertEqual(result, phony_workspace)
cat.get_workspace.assert_called_with(mock_settings.CASCADE_WORKSPACE)
cat.create_workspace.assert_called_with(
mock_settings.CASCADE_WORKSPACE,
f"http://www.geonode.org/{mock_settings.CASCADE_WORKSPACE}"
)
@mock.patch("geonode.services.serviceprocessors.handler.WmsServiceHandler",
autospec=True)
def test_get_service_handler_wms(self, mock_wms_handler):
phony_url = "http://fake"
handler.get_service_handler(phony_url, service_type=enumerations.WMS)
mock_wms_handler.assert_called_with(phony_url)
@mock.patch("arcrest.MapService",
autospec=True)
def test_get_service_handler_arcgis(self, mock_map_service):
mock_arcgis_service_contents = {
"currentVersion": 10.51,
"serviceDescription": "Droits petroliers et gaziers / Oil and Gas Rights",
"mapName": "Droits_petroliers_et_gaziers_Oil_and_Gas_Rights",
"description": "",
"copyrightText": "",
"supportsDynamicLayers": False,
"layers": [
{
"id": 0,
"name": "Droits pétroliers et gaziers / Oil and Gas Rights",
"parentLayerId": -1,
"defaultVisibility": True,
"subLayerIds": None,
"minScale": 0,
"maxScale": 0
}
],
"tables": [],
"spatialReference": {
"wkid": 4140,
"latestWkid": 4617
},
"singleFusedMapCache": False,
"initialExtent": {
"xmin": -144.97375000000002,
"ymin": 58.90551066699999,
"xmax": -57.55125000000002,
"ymax": 91.84630866699999,
"spatialReference": {
"wkid": 4140,
"latestWkid": 4617
}
},
"fullExtent": {
"xmin": -144.97375,
"ymin": 34.637024667000006,
"xmax": -57.55125,
"ymax": 91.84630866699999,
"spatialReference": {
"wkid": 4140,
"latestWkid": 4617
}
},
"minScale": 0,
"maxScale": 0,
"units": "esriDecimalDegrees",
"supportedImageFormatTypes": "PNG32,PNG24,PNG,JPG,DIB,TIFF,EMF,PS,PDF,GIF,SVG,SVGZ,BMP",
"documentInfo": {
"Title": "Droits petroliers et gaziers / Oil and Gas Rights",
"Author": "",
"Comments": "Droits petroliers et gaziers / Oil and Gas Rights",
"Subject": "Droits petroliers et gaziers / Oil and Gas Rights",
"Category": "",
"AntialiasingMode": "None",
"TextAntialiasingMode": "Force",
"Keywords": "Droits petroliers et gaziers,Oil and Gas Rights"
},
"capabilities": "Map,Query,Data",
"supportedQueryFormats": "JSON, AMF, geoJSON",
"exportTilesAllowed": False,
"supportsDatumTransformation": True,
"maxRecordCount": 1000,
"maxImageHeight": 2048,
"maxImageWidth": 2048,
"supportedExtensions": "FeatureServer, KmlServer, WFSServer, WMSServer"
}
mock_arcgis_service_json_struct = {
"supportsDynamicLayers": False,
"initialExtent": {
"xmin": -144.97375000000002,
"ymin": 58.90551066699999,
"ymax": 91.84630866699999,
"xmax": -57.55125000000002,
"spatialReference": {
"wkid": 4140,
"latestWkid": 4617
}
},
"documentInfo": {
"Category": "",
"Author": "",
"TextAntialiasingMode": "Force",
"Title": "Droits petroliers et gaziers / Oil and Gas Rights",
"Comments": "Droits petroliers et gaziers / Oil and Gas Rights",
"AntialiasingMode": "None",
"Keywords": "Droits petroliers et gaziers,Oil and Gas Rights",
"Subject": "Droits petroliers et gaziers / Oil and Gas Rights"
},
"spatialReference": {
"wkid": 4140,
"latestWkid": 4617
},
"description": "",
"layers": [
{
"name": "Droits pétroliers et gaziers / Oil and Gas Rights",
"maxScale": 0,
"defaultVisibility": True,
"parentLayerId": -1,
"id": 0,
"minScale": 0,
"subLayerIds": None
}
],
"tables": [],
"supportedImageFormatTypes": "PNG32,PNG24,PNG,JPG,DIB,TIFF,EMF,PS,PDF,GIF,SVG,SVGZ,BMP",
"capabilities": "Map,Query,Data",
"mapName": "Droits_petroliers_et_gaziers_Oil_and_Gas_Rights",
"currentVersion": 10.51,
"units": "esriDecimalDegrees",
"supportedQueryFormats": "JSON, AMF, geoJSON",
"maxRecordCount": 1000,
"exportTilesAllowed": False,
"maxImageHeight": 2048,
"supportedExtensions": "FeatureServer, KmlServer, WFSServer, WMSServer",
"fullExtent": {
"xmin": -144.97375,
"ymin": 34.637024667000006,
"ymax": 91.84630866699999,
"xmax": -57.55125,
"spatialReference": {
"wkid": 4140,
"latestWkid": 4617
}
},
"singleFusedMapCache": False,
"supportsDatumTransformation": True,
"maxImageWidth": 2048,
"maxScale": 0,
"copyrightText": "",
"minScale": 0,
"serviceDescription": "Droits petroliers et gaziers / Oil and Gas Rights"
}
phony_url = "http://fake"
mock_parsed_arcgis = mock.MagicMock(ArcMapService).return_value
(url, mock_parsed_arcgis) = mock.MagicMock(ArcMapService,
return_value=(phony_url,
mock_parsed_arcgis)).return_value
mock_parsed_arcgis.url = phony_url
mock_parsed_arcgis._contents = mock_arcgis_service_contents
mock_parsed_arcgis._json_struct = mock_arcgis_service_json_struct
mock_map_service.return_value = (phony_url, mock_parsed_arcgis)
handler = arcgis.ArcImageServiceHandler(phony_url)
self.assertEqual(handler.url, phony_url)
LayerESRIExtent = namedtuple('LayerESRIExtent', 'spatialReference xmin ymin ymax xmax')
LayerESRIExtentSpatialReference = namedtuple('LayerESRIExtentSpatialReference', 'wkid latestWkid')
layer_meta = MapLayer(
id=0,
title='Droits pétroliers et gaziers / Oil and Gas Rights',
abstract='Droits pétroliers et gaziers / Oil and Gas Rights',
type='Feature Layer',
geometryType='esriGeometryPolygon',
copyrightText='',
extent=LayerESRIExtent(
LayerESRIExtentSpatialReference(4140, 4617),
-144.97375,
34.637024667000006,
91.84630866699999,
-57.55125),
fields=[
{
'alias': 'OBJECTID',
'domain': None,
'type': 'esriFieldTypeOID',
'name': 'OBJECTID'
},
{
'alias': 'Numéro du titre / Title Number',
'length': 16,
'type': 'esriFieldTypeString',
'name': 'LICENCE_NUMBER',
'domain': None
},
{
'alias': 'Superficie actuelle (ha) / Current Area (ha)',
'domain': None,
'type': 'esriFieldTypeDouble',
'name': 'CURRENT_AREA_HA'
},
{
'alias': 'Code du type de permis / Licence Type Code',
'length': 5,
'type': 'esriFieldTypeString',
'name': 'AGRMT_TYPE',
'domain': None
},
{
'alias': 'Datum',
'length': 8,
'type': 'esriFieldTypeString',
'name': 'DATUM',
'domain': None
},
{
'alias': 'Région (anglais) / Region (English)',
'length': 64,
'type': 'esriFieldTypeString',
'name': 'REGION_E',
'domain': None
},
{
'alias': 'Région (français) / Region (French)',
'length': 64,
'type': 'esriFieldTypeString',
'name': 'REGION_F',
'domain': None
},
{
'alias': 'Représentant / Representative',
'length': 50,
'type': 'esriFieldTypeString',
'name': 'COMPANY_NAME',
'domain': None
},
{
'alias': "Date d'entrée en vigueur / Effective Date",
'length': 8,
'type': 'esriFieldTypeDate',
'name': 'LICENCE_ISSUE_DATE',
'domain': None
},
{
'alias': "Date d'échéance / Expiry Date",
'length': 8,
'type': 'esriFieldTypeDate',
'name': 'LICENCE_EXPIRY_DATE',
'domain': None
},
{
'alias': "Type d'accord (anglais) / Agreement Type (English)",
'length': 50,
'type': 'esriFieldTypeString',
'name': 'AGRMT_TYPE_E',
'domain': None
},
{
'alias': "Type d'accord (français) / Agreement Type (French)",
'length': 50,
'type': 'esriFieldTypeString',
'name': 'AGRMT_TYPE_F',
'domain': None
},
{
'alias': 'Shape',
'domain': None,
'type': 'esriFieldTypeGeometry',
'name': 'SHAPE'
}
],
minScale=0,
maxScale=0
)
resource_fields = handler._get_indexed_layer_fields(layer_meta)
self.assertEqual(resource_fields['alternate'], f'{slugify(phony_url)}:{layer_meta.id}')
@mock.patch("arcrest.MapService",
autospec=True)
def test_get_arcgis_alternative_structure(self, mock_map_service):
LayerESRIExtent = namedtuple('LayerESRIExtent', 'spatialReference xmin ymin ymax xmax')
LayerESRIExtentSpatialReference = namedtuple('LayerESRIExtentSpatialReference', 'wkid latestWkid')
mock_arcgis_service_contents = {
'copyrightText': '',
'description': '',
'documentInfo': {
'Author': 'Administrator',
'Category': '',
'Comments': '',
'Keywords': '',
'Subject': '',
'Title': 'basemap_ortofoto_AGEA2011'
},
'fullExtent': {
'xmax': 579764.2319999984,
'xmin': 386130.6820000001,
'ymax': 4608909.064,
'ymin': 4418016.7140000025
},
'initialExtent': {
'xmax': 605420.5635976626,
'xmin': 349091.7176066373,
'ymax': 4608197.140968505,
'ymin': 4418728.637031497
},
'layers': [
{
'copyrightText': '',
'definitionExpression': '',
'description': '',
'displayField': '',
'extent': LayerESRIExtent(
LayerESRIExtentSpatialReference(None, None),
570962.7069999985,
4600232.139,
394932.207,
4426693.639000002),
'fields': [],
'geometryType': '',
'id': 1,
'maxScale': 0.0,
'minScale': 0.0,
'name': 'Regione_Campania.ecw',
'title': 'Regione_Campania.ecw',
'parentLayer': {
'id': -1,
'name': '-1'
},
'subLayers': [],
'type': 'Raster Layer'
}
],
'mapName': 'Layers',
'serviceDescription': '',
'singleFusedMapCache': True,
'spatialReference': None,
'tileInfo': {
'cols': 512,
'compressionQuality': 0,
'dpi': 96,
'format': 'PNG8',
'lods': [
{'level': 0, 'resolution': 185.20870375074085, 'scale': 700000.0},
{'level': 1, 'resolution': 66.1459656252646, 'scale': 250000.0},
{'level': 2, 'resolution': 26.458386250105836, 'scale': 100000.0},
{'level': 3, 'resolution': 19.843789687579378, 'scale': 75000.0},
{'level': 4, 'resolution': 13.229193125052918, 'scale': 50000.0},
{'level': 5, 'resolution': 6.614596562526459, 'scale': 25000.0},
{'level': 6, 'resolution': 2.6458386250105836, 'scale': 10000.0},
{'level': 7, 'resolution': 1.3229193125052918, 'scale': 5000.0},
{'level': 8, 'resolution': 0.5291677250021167, 'scale': 2000.0}
],
'origin': {
'x': 289313.907000001,
'y': 4704355.239
},
'rows': 512,
'spatialReference': None
},
'units': 'esriMeters'
}
phony_url = "http://sit.cittametropolitana.na.it/arcgis/rest/services/basemap_ortofoto_AGEA2011/MapServer"
mock_parsed_arcgis = mock.MagicMock(ArcMapService).return_value
(url, mock_parsed_arcgis) = mock.MagicMock(ArcMapService,
return_value=(phony_url,
mock_parsed_arcgis)).return_value
mock_parsed_arcgis.url = phony_url
mock_parsed_arcgis.layers = mock_arcgis_service_contents['layers']
mock_parsed_arcgis._contents = mock_arcgis_service_contents
mock_parsed_arcgis._json_struct = mock_arcgis_service_contents
mock_map_service.return_value = (phony_url, mock_parsed_arcgis)
handler = arcgis.ArcImageServiceHandler(phony_url)
self.assertEqual(handler.url, phony_url)
layer_meta = handler._layer_meta(mock_parsed_arcgis.layers[0])
self.assertIsNotNone(layer_meta)
self.assertEqual(layer_meta.id, 1)
resource_fields = handler._get_indexed_layer_fields(layer_meta)
self.assertEqual(resource_fields['alternate'], f'{slugify(phony_url)}:{layer_meta.id}')
test_user, created = get_user_model().objects.get_or_create(username="serviceowner")
if created:
test_user.set_password("somepassword")
test_user.save()
result = handler.create_geonode_service(test_user)
try:
geonode_service, created = Service.objects.get_or_create(
base_url=result.base_url,
owner=test_user)
Layer.objects.filter(remote_service=geonode_service).delete()
HarvestJob.objects.filter(service=geonode_service).delete()
handler._harvest_resource(layer_meta, geonode_service)
geonode_layer = Layer.objects.filter(remote_service=geonode_service).get()
self.assertIsNotNone(geonode_layer)
self.assertNotEqual(geonode_layer.srid, "EPSG:4326")
harvest_job, created = HarvestJob.objects.get_or_create(
service=geonode_service,
resource_id=geonode_layer.alternate
)
self.assertIsNotNone(harvest_job)
Layer.objects.filter(remote_service=geonode_service).delete()
self.assertEqual(HarvestJob.objects.filter(service=geonode_service,
resource_id=geonode_layer.alternate).count(), 0)
except Service.DoesNotExist as e:
# In the case the Service URL becomes inaccessible for some reason
logger.error(e)
class WmsServiceHandlerTestCase(GeoNodeBaseTestSupport):
def setUp(self):
super(WmsServiceHandlerTestCase, self).setUp()
self.phony_url = ("http://a-really-long-and-fake-name-here-so-that-"
"we-use-it-in-tests")
self.phony_title = "a generic title"
self.phony_version = "s.version"
self.phony_layer_name = "phony_name"
self.phony_keywords = ["first", "second"]
mock_parsed_wms = mock.MagicMock(OwsWebMapService).return_value
(url, mock_parsed_wms) = mock.MagicMock(WebMapService,
return_value=(self.phony_url,
mock_parsed_wms)).return_value
mock_parsed_wms.provider.url = self.phony_url
mock_parsed_wms.identification.abstract = None
mock_parsed_wms.identification.title = self.phony_title
mock_parsed_wms.identification.version = self.phony_version
mock_parsed_wms.identification.keywords = self.phony_keywords
mock_layer_meta = mock.MagicMock(ContentMetadata)
mock_layer_meta.name = self.phony_layer_name
mock_layer_meta.title = self.phony_layer_name
mock_layer_meta.abstract = ""
mock_layer_meta.keywords = []
mock_layer_meta.children = []
mock_layer_meta.crsOptions = ["EPSG:3857"]
mock_layer_meta.boundingBox = [-5000, -5000, 5000, 5000, "EPSG:3857"]
mock_parsed_wms.contents = {
mock_layer_meta.name: mock_layer_meta,
}
self.parsed_wms = mock_parsed_wms
self.test_user, created = get_user_model().objects.get_or_create(username="serviceowner")
if created:
self.test_user.set_password("somepassword")
self.test_user.save()
self.local_user, created = get_user_model().objects.get_or_create(username="serviceuser")
if created:
self.local_user.set_password("somepassword")
self.local_user.save()
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_has_correct_url(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
self.assertEqual(handler.url, self.phony_url)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_has_valid_name_when_no_title_exists(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
mock_wms.return_value[1].identification.title = ""
handler = wms.WmsServiceHandler(self.phony_url)
self.assertEqual(
handler.name, slugify(self.phony_url)[:255])
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_has_valid_name_when_title_exists(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
self.assertNotEqual(handler.name, slugify(self.phony_title))
self.assertEqual("a-generic-title", slugify(self.phony_title))
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_has_correct_service_type(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
self.assertEqual(handler.service_type, enumerations.WMS)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.wms.settings",
autospec=True)
def test_detects_indexed_service(self, mock_settings, mock_wms):
mock_settings.DEFAULT_MAP_CRS = "EPSG:3857"
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
self.assertEqual(handler.indexing_method, enumerations.INDEXED)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.wms.settings",
autospec=True)
def test_detects_cascaded_service(self, mock_settings, mock_wms):
mock_settings.DEFAULT_MAP_CRS = "EPSG:3857"
mock_layer_meta = mock.MagicMock(ContentMetadata)
mock_layer_meta.name = "phony_name"
mock_layer_meta.children = []
mock_layer_meta.crsOptions = ["epsg:4326"]
self.parsed_wms.contents = {
mock_layer_meta.name: mock_layer_meta,
}
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
self.assertEqual(handler.indexing_method, enumerations.CASCADED)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_create_geonode_service(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
result = handler.create_geonode_service(self.test_user)
self.assertEqual(result.base_url, self.phony_url)
self.assertEqual(result.type, handler.service_type)
self.assertEqual(result.method, handler.indexing_method)
self.assertEqual(result.owner, self.test_user)
self.assertEqual(result.version, self.phony_version)
self.assertEqual(result.name, handler.name)
self.assertEqual(result.title, self.phony_title)
# mata_data_only is set to Try
self.assertTrue(result.metadata_only)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_get_keywords(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
result = handler.get_keywords()
self.assertEqual(result, self.phony_keywords)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_get_resource(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
result = handler.get_resource(self.phony_layer_name)
self.assertEqual(result.name, self.phony_layer_name)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
def test_get_resources(self, mock_wms):
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
result = list(handler.get_resources())
self.assertEqual(result[0].name, self.phony_layer_name)
test_user, created = get_user_model().objects.get_or_create(username="serviceowner")
if created:
test_user.set_password("somepassword")
test_user.save()
result = handler.create_geonode_service(test_user)
try:
geonode_service, created = Service.objects.get_or_create(
base_url=result.base_url,
owner=test_user)
Layer.objects.filter(remote_service=geonode_service).delete()
HarvestJob.objects.filter(service=geonode_service).delete()
result = list(handler.get_resources())
layer_meta = handler.get_resource(result[0].name)
resource_fields = handler._get_indexed_layer_fields(layer_meta)
keywords = resource_fields.pop("keywords")
resource_fields["keywords"] = keywords
resource_fields["is_approved"] = True
resource_fields["is_published"] = True
geonode_layer = handler._create_layer(geonode_service, **resource_fields)
self.assertIsNotNone(geonode_layer)
self.assertNotEqual(geonode_layer.srid, "EPSG:4326")
harvest_job, created = HarvestJob.objects.get_or_create(
service=geonode_service,
resource_id=geonode_layer.alternate
)
self.assertIsNotNone(harvest_job)
Layer.objects.filter(remote_service=geonode_service).delete()
self.assertEqual(HarvestJob.objects.filter(service=geonode_service,
resource_id=geonode_layer.alternate).count(), 0)
legend_url = handler._create_layer_legend_link(geonode_layer)
self.assertTrue('sld_version=1.1.0' in str(legend_url))
except Service.DoesNotExist as e:
# In the case the Service URL becomes inaccessible for some reason
logger.error(e)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.wms.settings",
autospec=True)
def test_offers_geonode_projection(self, mock_settings, mock_wms):
mock_settings.DEFAULT_MAP_CRS = "EPSG:3857"
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
result = handler._offers_geonode_projection()
self.assertTrue(result)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.wms.settings",
autospec=True)
def test_does_not_offer_geonode_projection(self, mock_settings, mock_wms):
mock_settings.DEFAULT_MAP_CRS = "EPSG:3857"
mock_wms.return_value = (self.phony_url, self.parsed_wms)
self.parsed_wms.contents[self.phony_layer_name].crsOptions = [
"EPSG:4326"]
handler = wms.WmsServiceHandler(self.phony_url)
result = handler._offers_geonode_projection()
self.assertFalse(result)
@mock.patch("geonode.services.serviceprocessors.wms.WebMapService",
autospec=True)
@mock.patch("geonode.services.serviceprocessors.base.get_geoserver_"
"cascading_workspace", autospec=True)
def test_get_store(self, mock_get_gs_cascading_store, mock_wms):
mock_workspace = mock_get_gs_cascading_store.return_value
mock_catalog = mock_workspace.catalog
mock_catalog.get_store.return_value = None
mock_wms.return_value = (self.phony_url, self.parsed_wms)
handler = wms.WmsServiceHandler(self.phony_url)
handler._get_store(create=True)
mock_catalog.create_wmsstore.assert_called_with(
name=handler.name,
workspace=mock_workspace,
user=mock_catalog.username,
password=mock_catalog.password
)
@flaky(max_runs=3)
def test_local_user_cant_delete_service(self):
self.client.logout()
response = self.client.get(reverse('register_service'))
self.assertEqual(response.status_code, 302)
url = 'https://demo.geo-solutions.it/geoserver/ows?service=wms&version=1.3.0&request=GetCapabilities'
# url = "http://fake"
service_type = enumerations.WMS
form_data = {
'url': url,
'type': service_type
}
form = forms.CreateServiceForm(form_data)
self.assertTrue(form.is_valid())
self.client.login(username='serviceowner', password='somepassword')
response = self.client.post(reverse('register_service'), data=form_data)
s = Service.objects.all().first()
self.assertEqual(len(Service.objects.all()), 1)
self.assertEqual(s.owner, self.test_user)
self.client.login(username='serviceuser', password='somepassword')
response = self.client.post(reverse('edit_service', args=(s.id,)))
self.assertEqual(response.status_code, 401)
response = self.client.post(reverse('remove_service', args=(s.id,)))
self.assertEqual(response.status_code, 401)
self.assertEqual(len(Service.objects.all()), 1)
self.client.login(username='serviceowner', password='somepassword')
form_data = {
'service-title': 'Foo Title',
'service-description': 'Foo Description',
'service-abstract': 'Foo Abstract',
'service-keywords': 'Foo, Service, OWS'
}
form = forms.ServiceForm(form_data, instance=s, prefix="service")
self.assertTrue(form.is_valid())
response = self.client.post(reverse('edit_service', args=(s.id,)), data=form_data)
self.assertEqual(s.title, 'Foo Title')
self.assertEqual(s.description, 'Foo Description')
self.assertEqual(s.abstract, 'Foo Abstract')
self.assertEqual(['Foo', 'OWS', 'Service'],
list(s.keywords.all().values_list('name', flat=True)))
response = self.client.post(reverse('remove_service', args=(s.id,)))
self.assertEqual(len(Service.objects.all()), 0)
@flaky(max_runs=3)
def test_add_duplicate_remote_service_url(self):
form_data = {
'url': 'https://demo.geo-solutions.it/geoserver/ows?service=wms&version=1.3.0&request=GetCapabilities',
'type': enumerations.WMS
}
self.client.login(username='serviceowner', password='somepassword')
# Add the first resource
url = 'https://demo.geo-solutions.it/geoserver/ows?service=wms&version=1.3.0&request=GetCapabilities'
# url = "http://fake"
service_type = enumerations.WMS
form_data = {
'url': url,
'type': service_type
}
form = forms.CreateServiceForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(Service.objects.count(), 0)
self.client.post(reverse('register_service'), data=form_data)
self.assertEqual(Service.objects.count(), 1)
# Try adding the same URL again
form = forms.CreateServiceForm(form_data)
self.assertFalse(form.is_valid())
self.assertEqual(Service.objects.count(), 1)
self.client.post(reverse('register_service'), data=form_data)
self.assertEqual(Service.objects.count(), 1)
class WmsServiceHarvestingTestCase(StaticLiveServerTestCase):
selenium = None
@classmethod
def setUpClass(cls):
super(WmsServiceHarvestingTestCase, cls).setUpClass()
try:
cls.client = Client()
UserModel = get_user_model()
cls.user = UserModel.objects.create_user(username='test', password='test@123', first_name='ather',
last_name='ashraf', is_staff=True,
is_active=True, is_superuser=False)
cls.user.save()
cls.client.login(username='test', password='test@123')
cls.cookie = cls.client.cookies['sessionid']
cls.selenium = webdriver.Firefox()
cls.selenium.implicitly_wait(10)
cls.selenium.get(f"{cls.live_server_url}/")
cls.selenium.add_cookie({'name': 'sessionid', 'value': cls.cookie.value, 'secure': False, 'path': '/'})
cls.selenium.refresh()
reg_url = reverse('register_service')
cls.client.get(reg_url)
url = 'https://demo.geo-solutions.it/geoserver/ows?service=wms&version=1.3.0&request=GetCapabilities'
service_type = enumerations.WMS
form_data = {
'url': url,
'type': service_type
}
forms.CreateServiceForm(form_data)
response = cls.client.post(reverse('register_service'), data=form_data)
cls.selenium.get(cls.live_server_url + response.url)
cls.selenium.refresh()
except Exception as e:
msg = str(e)
print(msg)
@classmethod
def tearDownClass(cls):
if cls.selenium:
cls.selenium.quit()
super(WmsServiceHarvestingTestCase, cls).tearDownClass()
def test_harvest_resources(self):
if self.selenium:
table = self.selenium.find_element_by_id('resource_table')
if table:
test_resource_table_status(self, table, False)
self.selenium.find_element_by_id('id-filter').send_keys('atlantis:roads')
self.selenium.find_element_by_id('btn-id-filter').click()
test_resource_table_status(self, table, True)
self.selenium.find_element_by_id('name-filter').send_keys('landmarks')
self.selenium.find_element_by_id('btn-name-filter').click()
test_resource_table_status(self, table, True)
self.selenium.find_element_by_id('desc-filter').send_keys('None')
self.selenium.find_element_by_id('btn-desc-filter').click()
test_resource_table_status(self, table, True)
self.selenium.find_element_by_id('desc-filter').send_keys('')
self.selenium.find_element_by_id('btn-desc-filter').click()
test_resource_table_status(self, table, True)
self.selenium.find_element_by_id('btnClearFilter').click()
test_resource_table_status(self, table, False)
self.selenium.find_element_by_id('id-filter').send_keys('atlantis:tiger_roads_tiger_roads')
self.selenium.find_element_by_id('btn-id-filter').click()
# self.selenium.find_element_by_id('option_atlantis:tiger_roads_tiger_roads').click()
# self.selenium.find_element_by_tag_name('form').submit()
class TestServiceViews(GeoNodeBaseTestSupport):
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
self.admin = get_user_model().objects.get(username='admin')
self.sut, _ = Service.objects.get_or_create(
type=WMS,
name='Bogus',
title='Pocus',
owner=self.admin,
method=INDEXED,
metadata_only=True,
base_url='http://bogus.pocus.com/ows')
self.sut.clear_dirty_state()
def test_user_admin_can_access_to_page(self):
self.client.login(username='admin', password='admin')
response = self.client.get(reverse('services'))
self.assertEqual(response.status_code, 200)
def test_invalid_user_cannot_access_to_page(self):
self.client.login(username='bobby', password='bobby')
response = self.client.get(reverse('services'))
self.assertEqual(response.status_code, 302)
| gpl-3.0 | 4,394,017,487,905,976,300 | 42.703622 | 115 | 0.559502 | false |
DaniilLeksin/theblog | posts/views.py | 1 | 1155 | from django.shortcuts import render
from rest_framework import permissions, viewsets
from rest_framework.response import Response
from posts.models import Post
from posts.permissions import IsAuthorOfPost
from posts.serializers import PostSerializer
# Create your views here.
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.order_by('-created_at')
serializer_class = PostSerializer
def get_permissions(self):
if self.request.method in permissions.SAFE_METHODS:
return (permissions.AllowAny(),)
return (permissions.IsAuthenticated(), IsAuthorOfPost(),)
def perform_create(self, serializer):
instance = serializer.save(author=self.request.user)
return super(PostViewSet, self).perform_create(serializer)
class AccountPostsViewSet(viewsets.ViewSet):
queryset = Post.objects.select_related('author').all()
serializer_class = PostSerializer
def list(self, request, account_username=None):
queryset = self.queryset.filter(author__username=account_username)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
| gpl-2.0 | 8,992,568,961,566,526,000 | 30.216216 | 74 | 0.750649 | false |
matthiascy/panda3d | direct/src/p3d/JavaScript.py | 8 | 12034 | """ This module defines some simple classes and instances which are
useful when writing code that integrates with JavaScript, especially
code that runs in a browser via the web plugin. """
__all__ = ["UndefinedObject", "Undefined", "ConcreteStruct", "BrowserObject", "MethodWrapper"]
import types
class UndefinedObject:
""" This is a special object that is returned by the browser to
represent an "undefined" or "void" value, typically the value for
an uninitialized variable or undefined property. It has no
attributes, similar to None, but it is a slightly different
concept in JavaScript. """
def __nonzero__(self):
return False
def __str__(self):
return "Undefined"
# In fact, we normally always return this precise instance of the
# UndefinedObject.
Undefined = UndefinedObject()
class ConcreteStruct:
""" Python objects that inherit from this class are passed to
JavaScript as a concrete struct: a mapping from string -> value,
with no methods, passed by value. This can be more optimal than
traditional Python objects which are passed by reference,
especially for small objects which might be repeatedly referenced
on the JavaScript side. """
def __init__(self):
pass
def getConcreteProperties(self):
""" Returns a list of 2-tuples of the (key, value) pairs that
are to be passed to the concrete instance. By default, this
returns all properties of the object. You can override this
to restrict the set of properties that are uploaded. """
return self.__dict__.items()
class BrowserObject:
""" This class provides the Python wrapper around some object that
actually exists in the plugin host's namespace, e.g. a JavaScript
or DOM object. """
def __init__(self, runner, objectId):
self.__dict__['_BrowserObject__runner'] = runner
self.__dict__['_BrowserObject__objectId'] = objectId
# This element is filled in by __getattr__; it connects
# the object to its parent.
self.__dict__['_BrowserObject__childObject'] = (None, None)
# This is a cache of method names to MethodWrapper objects in
# the parent object.
self.__dict__['_BrowserObject__methods'] = {}
def __del__(self):
# When the BrowserObject destructs, tell the parent process it
# doesn't need to keep around its corresponding P3D_object any
# more.
self.__runner.dropObject(self.__objectId)
def __cacheMethod(self, methodName):
""" Stores a pointer to the named method on this object, so
that the next time __getattr__ is called, it can retrieve the
method wrapper without having to query the browser. This
cache assumes that callable methods don't generally come and
go on and object.
The return value is the MethodWrapper object. """
method = self.__methods.get(methodName, None)
if method is None:
method = MethodWrapper(self.__runner, self, methodName)
self.__methods[methodName] = method
return method
def __str__(self):
return self.toString()
def __nonzero__(self):
return True
def __call__(self, *args, **kw):
needsResponse = True
if 'needsResponse' in kw:
needsResponse = kw['needsResponse']
del kw['needsResponse']
if kw:
raise ArgumentError, 'Keyword arguments not supported'
try:
parentObj, attribName = self.__childObject
if parentObj:
# Call it as a method.
if parentObj is self.__runner.dom and attribName == 'alert':
# As a special hack, we don't wait for the return
# value from the alert() call, since this is a
# blocking call, and waiting for this could cause
# problems.
needsResponse = False
if parentObj is self.__runner.dom and attribName == 'eval' and len(args) == 1 and isinstance(args[0], types.StringTypes):
# As another special hack, we make dom.eval() a
# special case, and map it directly into an eval()
# call. If the string begins with 'void ', we further
# assume we're not waiting for a response.
if args[0].startswith('void '):
needsResponse = False
result = self.__runner.scriptRequest('eval', parentObj, value = args[0], needsResponse = needsResponse)
else:
# This is a normal method call.
try:
result = self.__runner.scriptRequest('call', parentObj, propertyName = attribName, value = args, needsResponse = needsResponse)
except EnvironmentError:
# Problem on the call. Maybe no such method?
raise AttributeError
# Hey, the method call appears to have succeeded.
# Cache the method object on the parent so we won't
# have to look up the method wrapper again next time.
parentObj.__cacheMethod(attribName)
else:
# Call it as a plain function.
result = self.__runner.scriptRequest('call', self, value = args, needsResponse = needsResponse)
except EnvironmentError:
# Some odd problem on the call.
raise TypeError
return result
def __getattr__(self, name):
""" Remaps attempts to query an attribute, as in obj.attr,
into the appropriate calls to query the actual browser object
under the hood. """
# First check to see if there's a cached method wrapper from a
# previous call.
method = self.__methods.get(name, None)
if method:
return method
# No cache. Go query the browser for the desired value.
try:
value = self.__runner.scriptRequest('get_property', self,
propertyName = name)
except EnvironmentError:
# Failed to retrieve the attribute. But maybe there's a
# method instead?
if self.__runner.scriptRequest('has_method', self, propertyName = name):
# Yes, so create a method wrapper for it.
return self.__cacheMethod(name)
raise AttributeError(name)
if isinstance(value, BrowserObject):
# Fill in the parent object association, so __call__ can
# properly call a method. (Javascript needs to know the
# method container at the time of the call, and doesn't
# store it on the function object.)
value.__dict__['_BrowserObject__childObject'] = (self, name)
return value
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
return
result = self.__runner.scriptRequest('set_property', self,
propertyName = name,
value = value)
if not result:
raise AttributeError(name)
def __delattr__(self, name):
if name in self.__dict__:
del self.__dict__[name]
return
result = self.__runner.scriptRequest('del_property', self,
propertyName = name)
if not result:
raise AttributeError(name)
def __getitem__(self, key):
""" Remaps attempts to query an attribute, as in obj['attr'],
into the appropriate calls to query the actual browser object
under the hood. Following the JavaScript convention, we treat
obj['attr'] almost the same as obj.attr. """
try:
value = self.__runner.scriptRequest('get_property', self,
propertyName = str(key))
except EnvironmentError:
# Failed to retrieve the property. We return IndexError
# for numeric keys so we can properly support Python's
# iterators, but we return KeyError for string keys to
# emulate mapping objects.
if isinstance(key, types.StringTypes):
raise KeyError(key)
else:
raise IndexError(key)
return value
def __setitem__(self, key, value):
result = self.__runner.scriptRequest('set_property', self,
propertyName = str(key),
value = value)
if not result:
if isinstance(key, types.StringTypes):
raise KeyError(key)
else:
raise IndexError(key)
def __delitem__(self, key):
result = self.__runner.scriptRequest('del_property', self,
propertyName = str(key))
if not result:
if isinstance(key, types.StringTypes):
raise KeyError(key)
else:
raise IndexError(key)
class MethodWrapper:
""" This is a Python wrapper around a property of a BrowserObject
that doesn't appear to be a first-class object in the Python
sense, but is nonetheless a callable method. """
def __init__(self, runner, parentObj, objectId):
self.__dict__['_MethodWrapper__runner'] = runner
self.__dict__['_MethodWrapper__childObject'] = (parentObj, objectId)
def __str__(self):
parentObj, attribName = self.__childObject
return "%s.%s" % (parentObj, attribName)
def __nonzero__(self):
return True
def __call__(self, *args, **kw):
needsResponse = True
if 'needsResponse' in kw:
needsResponse = kw['needsResponse']
del kw['needsResponse']
if kw:
raise ArgumentError, 'Keyword arguments not supported'
try:
parentObj, attribName = self.__childObject
# Call it as a method.
if parentObj is self.__runner.dom and attribName == 'alert':
# As a special hack, we don't wait for the return
# value from the alert() call, since this is a
# blocking call, and waiting for this could cause
# problems.
needsResponse = False
if parentObj is self.__runner.dom and attribName == 'eval' and len(args) == 1 and isinstance(args[0], types.StringTypes):
# As another special hack, we make dom.eval() a
# special case, and map it directly into an eval()
# call. If the string begins with 'void ', we further
# assume we're not waiting for a response.
if args[0].startswith('void '):
needsResponse = False
result = self.__runner.scriptRequest('eval', parentObj, value = args[0], needsResponse = needsResponse)
else:
# This is a normal method call.
try:
result = self.__runner.scriptRequest('call', parentObj, propertyName = attribName, value = args, needsResponse = needsResponse)
except EnvironmentError:
# Problem on the call. Maybe no such method?
raise AttributeError
except EnvironmentError:
# Some odd problem on the call.
raise TypeError
return result
def __setattr__(self, name, value):
""" setattr will generally fail on method objects. """
raise AttributeError(name)
def __delattr__(self, name):
""" delattr will generally fail on method objects. """
raise AttributeError(name)
| bsd-3-clause | 8,757,385,156,152,744,000 | 39.931973 | 151 | 0.571713 | false |
jeremykohn/closure-linter | closure_linter/full_test.py | 11 | 3621 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full regression-type (Medium) tests for gjslint.
Tests every error that can be thrown by gjslint. Based heavily on
devtools/javascript/gpylint/full_test.py
"""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import error_check
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
'empty_file.js',
'externs.js',
'externs_jsdoc.js',
'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
'limited_doc_checks.js',
'minimal.js',
'other.js',
'provide_blank.js',
'provide_extra.js',
'provide_missing.js',
'require_alias.js',
'require_all_caps.js',
'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
'require_interface_alias.js',
'require_interface_base.js',
'require_lower_case.js',
'require_missing.js',
'require_numeric.js',
'require_provide_blank.js',
'require_provide_missing.js',
'require_provide_ok.js',
'semicolon_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
'unused_local_variables.js',
'unused_private_members.js',
'utf8.html',
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(
filetestcase.AnnotatedFileTestCase(
resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| apache-2.0 | 3,204,844,735,459,870,700 | 28.92562 | 75 | 0.674399 | false |
mmolero/pcloudpy | pcloudpy/gui/MainWindowBase.py | 1 | 11800 | """
Template MainWindowBase.py
"""
#Author: Miguel Molero <[email protected]>
import sys
import os
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
import markdown2
import yaml
import pprint
#own components
from pcloudpy.gui.resources_rc import *
#from pcloudpy.gui.graphics.QVTKWidget import QVTKWidget
from pcloudpy.gui.AppObject import AppObject
from pcloudpy.gui.utils.qhelpers import *
from pcloudpy.gui.components.ViewWidget import ViewWidget
from pcloudpy.gui.components.TabViewWidget import TabViewWidget
from pcloudpy.gui.components.ToolboxesWidget import ToolBoxesWidget
from pcloudpy.gui.components.DatasetsWidget import DatasetsWidget
from pcloudpy.gui.components.ObjectInspectorWidget import ObjectInspectorWidget
from pcloudpy.gui.components.FilterWidget import FilterWidget
#from shell.PythonConsole import PythonConsole
#from shell.IPythonConsole import IPythonConsole
#from shell.CodeEdit import CodeEdit
NAME = "pcloudpy"
class Info(object):
version = "0.10"
date = "27-10-2015"
class MainWindowBase(QMainWindow):
"""
Base Class for the MainWindow Object. This class should inherit its attributes and methods to a MainWindow Class
"""
def __init__(self, parent = None):
super(MainWindowBase, self).__init__(parent)
self.setLocale((QLocale(QLocale.English, QLocale.UnitedStates)))
self._software_name = NAME
self.App = AppObject()
self.init()
self.create_menus()
self.create_toolbars()
self.setup_docks()
self.setup_graphicsview()
self.setup_statusbar()
self.setup_connections()
self.init_settings()
self.init_toolboxes()
QTimer.singleShot(0,self.load_initial_file)
@property
def software_name(self):
return self._software_name
@software_name.setter
def software_name(self, name):
self._software_name = name
def init(self):
self.Info = Info()
self.dirty = False
self.reset = False
self.filename = None
self.recent_files = []
self.dir_path = os.getcwd()
self.setGeometry(100,100,900,600)
self.setMinimumSize(400,400)
self.setMaximumSize(2000,1500)
self.setWindowFlags(self.windowFlags())
self.setWindowTitle(self.software_name)
#Put here your init code
def set_title(self, fname=None):
title = os.path.basename(fname)
self.setWindowTitle("%s:%s"%(self.softwareName,title))
def load_initial_file(self):
settings = QSettings()
fname = settings.value("LastFile")
if fname and QFile.exists(fname):
self.load_file(fname)
def load_file(self, fname=None):
if fname is None:
action = self.sender()
if isinstance(action, QAction):
fname = action.data()
if not self.ok_to_Continue():
return
else:
return
if fname:
self.filename = None
self.add_recent_file(fname)
self.filename = fname
self.dirty = False
self.set_title(fname)
#Add More actions
#
#
def add_recent_file(self, fname):
if fname is None:
return
if not self.recentFiles.count(fname):
self.recentFiles.insert(0,fname)
while len(self.recentFiles)>9:
self.recentFiles.pop()
def create_menus(self):
self.menubar = self.menuBar()
file_menu = self.menubar.addMenu(self.tr('&File'))
help_menu = self.menubar.addMenu(self.tr("&Help"))
file_open_action = createAction(self, "&Open Dataset[s]", self.file_open)
file_open_action.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
help_about_action = createAction(self, "&About %s"%self._software_name, self.help_about, icon="pcloudpy.png")
addActions(file_menu, (file_open_action,))
addActions(help_menu, (help_about_action,))
def setup_connections(self):
#Main Window
self.workspaceLineEdit.textEdited.connect(self.editWorkSpace)
#self.code_edit.codeRequested.connect(self.console_widget.execute_code)
def setup_docks(self):
#Toolboxes
self.toolboxes_widget = ToolBoxesWidget()
self.toolboxes_dockwidget = QDockWidget(self.tr("Toolboxes"))
self.toolboxes_dockwidget.setObjectName("Toolboxes-Dock")
self.toolboxes_dockwidget.setWidget(self.toolboxes_widget)
self.toolboxes_dockwidget.setAllowedAreas(Qt.RightDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, self.toolboxes_dockwidget)
#Datasets
self.datasets_widget = DatasetsWidget()
self.datasets_dockwidget = QDockWidget(self.tr("Datasets"))
self.datasets_dockwidget.setObjectName("Datasets-Dock")
self.datasets_dockwidget.setWidget(self.datasets_widget)
self.datasets_dockwidget.setAllowedAreas(Qt.LeftDockWidgetArea)
self.addDockWidget(Qt.LeftDockWidgetArea, self.datasets_dockwidget)
#Object Inspector
self.object_inspector_widget = ObjectInspectorWidget()
self.object_inspector_dockwidget = QDockWidget(self.tr("Object Inspector"))
self.object_inspector_dockwidget.setObjectName("Object-Inspector-Dock")
self.object_inspector_dockwidget.setWidget(self.object_inspector_widget)
self.object_inspector_dockwidget.setAllowedAreas(Qt.LeftDockWidgetArea)
self.addDockWidget(Qt.LeftDockWidgetArea, self.object_inspector_dockwidget)
#Filter Widget
self.filter_widget = FilterWidget()
self.filter_widget_dockwidget = QDockWidget(self.tr("Filter Setup"))
self.filter_widget_dockwidget.setObjectName("Filter-Setup-Dock")
self.filter_widget_dockwidget.setWidget(self.filter_widget)
self.filter_widget_dockwidget.setAllowedAreas(Qt.RightDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, self.filter_widget_dockwidget)
#Console
self.tab_console = QTabWidget()
#self.console_widget = IPythonConsole(self, self.App)
#self.code_edit = CodeEdit()
#self.tab_console.addTab(self.console_widget, "Console")
#self.tab_console.addTab(self.code_edit, "Editor")
#self.console_widget_dockwidget = QDockWidget(self.tr("IPython"))
#self.console_widget_dockwidget.setObjectName("Console-Dock")
#self.console_widget_dockwidget.setWidget(self.tab_console)
#self.console_widget_dockwidget.setAllowedAreas(Qt.BottomDockWidgetArea)
#self.addDockWidget(Qt.BottomDockWidgetArea, self.console_widget_dockwidget)
def create_toolbars(self):
self.actionOpen_WorkSpace = createAction(self,"Set Workspace", self.setWorkSpace)
self.actionOpen_WorkSpace.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.first_toolbar = QToolBar(self)
self.first_toolbar.setObjectName("Workspace Toolbar")
self.first_toolbar.setAllowedAreas(Qt.TopToolBarArea | Qt.BottomToolBarArea)
self.workspaceLineEdit = QLineEdit()
self.workspaceLineEdit.setMinimumWidth(200)
self.first_toolbar.addWidget(QLabel("Workspace Dir"))
self.first_toolbar.addWidget(self.workspaceLineEdit)
self.first_toolbar.addAction(self.actionOpen_WorkSpace)
self.addToolBar(self.first_toolbar)
if self.dir_path is None:
self.dir_path = os.getcwd()
self.workspaceLineEdit.setText(self.dir_path)
self.addToolBarBreak()
def setup_graphicsview(self):
self.tab_view = TabViewWidget(self)
view = ViewWidget()
self.tab_view.addTab(view, "Layout #1")
self.setCentralWidget(self.tab_view)
#
self.datasets_widget.init_tree(view.model)
def setup_statusbar(self):
self.status = self.statusBar()
self.status.setSizeGripEnabled(False)
#Add more action
def setWorkSpace(self):
dir = QFileDialog.getExistingDirectory(None, self.tr("Set Workspace directory"), self.dir_path, QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
if dir:
self.dir_path = dir
self.workspaceLineEdit.setText(self.dir_path)
def editWorkSpace(self):
if os.path.isdir(self.workspaceLineEdit.text()):
self.dir_path = self.workspaceLineEdit.text()
def init_settings(self):
settings = QSettings()
self.recentFiles = settings.value("RecentFiles")
size = settings.value("MainWindow/Size",QSize(900,600))
position = settings.value("MainWindow/Position",QPoint(50,50))
self.restoreState(settings.value("MainWindow/State"))
self.dir_path = settings.value("DirPath")
#Retrives more options
if self.recentFiles is None:
self.recentFiles = []
self.resize(size)
self.move(position)
#Add more actions
self.workspaceLineEdit.setText(self.dir_path)
def reset_settings(self):
settings = QSettings()
settings.clear()
self.reset = True
self.close()
def init_toolboxes(self):
if hasattr(sys, 'frozen'):
#http://stackoverflow.com/questions/14750997/load-txt-file-from-resources-in-python
fd = QFile(":/config_toolboxes.yaml")
if fd.open(QIODevice.ReadOnly | QFile.Text):
text = QTextStream(fd).readAll()
fd.close()
data = yaml.load(text)
else:
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path,'resources', 'conf', 'config_toolboxes.yaml'), 'r') as f:
# use safe_load instead load
data = yaml.safe_load(f)
#pp = pprint.PrettyPrinter()
#pp.pprint(data)
self.toolboxes_widget.init_tree(data)
def ok_to_continue(self):
if self.dirty:
reply = QMessageBox.question(self,
"%s - Unsaved Changes"%self.softwareName,
"Save unsaved changes?",
QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
self.file_save()
return True
def file_new(self):
pass
def file_open(self):
pass
def file_saveAs(self):
pass
def file_save(self):
pass
def help_about(self):
message = read_file(":/about.md").format(self.Info.version, self.Info.date)
html = markdown2.markdown(str(message))
QMessageBox.about(self, "About %s"%NAME, html)
def closeEvent(self, event):
if self.reset:
return
if self.ok_to_continue():
settings = QSettings()
filename = self.filename if self.filename is not None else None
settings.setValue("LastFile", filename)
recentFiles = self.recentFiles if self.recentFiles else None
settings.setValue("RecentFiles", recentFiles)
settings.setValue("MainWindow/Size", self.size())
settings.setValue("MainWindow/Position", self.pos())
settings.setValue("MainWindow/State", self.saveState())
settings.setValue("DirPath", self.dir_path)
#Set more options
else:
event.ignore()
if __name__=='__main__':
import sys
app = QApplication(sys.argv)
win = MainWindowBase()
win.show()
app.exec_()
| bsd-3-clause | -6,592,709,447,707,165,000 | 32.714286 | 163 | 0.644153 | false |
nataddrho/DigiCue-USB | Python3/src/venv/Lib/site-packages/serial/urlhandler/protocol_cp2110.py | 2 | 8540 | #! python
#
# Backend for Silicon Labs CP2110/4 HID-to-UART devices.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <[email protected]>
# (C) 2019 Google LLC
#
# SPDX-License-Identifier: BSD-3-Clause
# This backend implements support for HID-to-UART devices manufactured
# by Silicon Labs and marketed as CP2110 and CP2114. The
# implementation is (mostly) OS-independent and in userland. It relies
# on cython-hidapi (https://github.com/trezor/cython-hidapi).
# The HID-to-UART protocol implemented by CP2110/4 is described in the
# AN434 document from Silicon Labs:
# https://www.silabs.com/documents/public/application-notes/AN434-CP2110-4-Interface-Specification.pdf
# TODO items:
# - rtscts support is configured for hardware flow control, but the
# signaling is missing (AN434 suggests this is done through GPIO).
# - Cancelling reads and writes is not supported.
# - Baudrate validation is not implemented, as it depends on model and configuration.
import struct
import threading
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
import Queue
except ImportError:
import queue as Queue
import hid # hidapi
import serial
from serial.serialutil import SerialBase, SerialException, PortNotOpenError, to_bytes, Timeout
# Report IDs and related constant
_REPORT_GETSET_UART_ENABLE = 0x41
_DISABLE_UART = 0x00
_ENABLE_UART = 0x01
_REPORT_SET_PURGE_FIFOS = 0x43
_PURGE_TX_FIFO = 0x01
_PURGE_RX_FIFO = 0x02
_REPORT_GETSET_UART_CONFIG = 0x50
_REPORT_SET_TRANSMIT_LINE_BREAK = 0x51
_REPORT_SET_STOP_LINE_BREAK = 0x52
class Serial(SerialBase):
# This is not quite correct. AN343 specifies that the minimum
# baudrate is different between CP2110 and CP2114, and it's halved
# when using non-8-bit symbols.
BAUDRATES = (300, 375, 600, 1200, 1800, 2400, 4800, 9600, 19200,
38400, 57600, 115200, 230400, 460800, 500000, 576000,
921600, 1000000)
def __init__(self, *args, **kwargs):
self._hid_handle = None
self._read_buffer = None
self._thread = None
super(Serial, self).__init__(*args, **kwargs)
def open(self):
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
self._read_buffer = Queue.Queue()
self._hid_handle = hid.device()
try:
portpath = self.from_url(self.portstr)
self._hid_handle.open_path(portpath)
except OSError as msg:
raise SerialException(msg.errno, "could not open port {}: {}".format(self._port, msg))
try:
self._reconfigure_port()
except:
try:
self._hid_handle.close()
except:
pass
self._hid_handle = None
raise
else:
self.is_open = True
self._thread = threading.Thread(target=self._hid_read_loop)
self._thread.setDaemon(True)
self._thread.setName('pySerial CP2110 reader thread for {}'.format(self._port))
self._thread.start()
def from_url(self, url):
parts = urlparse.urlsplit(url)
if parts.scheme != "cp2110":
raise SerialException(
'expected a string in the forms '
'"cp2110:///dev/hidraw9" or "cp2110://0001:0023:00": '
'not starting with cp2110:// {{!r}}'.format(parts.scheme))
if parts.netloc: # cp2100://BUS:DEVICE:ENDPOINT, for libusb
return parts.netloc.encode('utf-8')
return parts.path.encode('utf-8')
def close(self):
self.is_open = False
if self._thread:
self._thread.join(1) # read timeout is 0.1
self._thread = None
self._hid_handle.close()
self._hid_handle = None
def _reconfigure_port(self):
parity_value = None
if self._parity == serial.PARITY_NONE:
parity_value = 0x00
elif self._parity == serial.PARITY_ODD:
parity_value = 0x01
elif self._parity == serial.PARITY_EVEN:
parity_value = 0x02
elif self._parity == serial.PARITY_MARK:
parity_value = 0x03
elif self._parity == serial.PARITY_SPACE:
parity_value = 0x04
else:
raise ValueError('Invalid parity: {!r}'.format(self._parity))
if self.rtscts:
flow_control_value = 0x01
else:
flow_control_value = 0x00
data_bits_value = None
if self._bytesize == 5:
data_bits_value = 0x00
elif self._bytesize == 6:
data_bits_value = 0x01
elif self._bytesize == 7:
data_bits_value = 0x02
elif self._bytesize == 8:
data_bits_value = 0x03
else:
raise ValueError('Invalid char len: {!r}'.format(self._bytesize))
stop_bits_value = None
if self._stopbits == serial.STOPBITS_ONE:
stop_bits_value = 0x00
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
stop_bits_value = 0x01
elif self._stopbits == serial.STOPBITS_TWO:
stop_bits_value = 0x01
else:
raise ValueError('Invalid stop bit specification: {!r}'.format(self._stopbits))
configuration_report = struct.pack(
'>BLBBBB',
_REPORT_GETSET_UART_CONFIG,
self._baudrate,
parity_value,
flow_control_value,
data_bits_value,
stop_bits_value)
self._hid_handle.send_feature_report(configuration_report)
self._hid_handle.send_feature_report(
bytes((_REPORT_GETSET_UART_ENABLE, _ENABLE_UART)))
self._update_break_state()
@property
def in_waiting(self):
return self._read_buffer.qsize()
def reset_input_buffer(self):
if not self.is_open:
raise PortNotOpenError()
self._hid_handle.send_feature_report(
bytes((_REPORT_SET_PURGE_FIFOS, _PURGE_RX_FIFO)))
# empty read buffer
while self._read_buffer.qsize():
self._read_buffer.get(False)
def reset_output_buffer(self):
if not self.is_open:
raise PortNotOpenError()
self._hid_handle.send_feature_report(
bytes((_REPORT_SET_PURGE_FIFOS, _PURGE_TX_FIFO)))
def _update_break_state(self):
if not self._hid_handle:
raise PortNotOpenError()
if self._break_state:
self._hid_handle.send_feature_report(
bytes((_REPORT_SET_TRANSMIT_LINE_BREAK, 0)))
else:
# Note that while AN434 states "There are no data bytes in
# the payload other than the Report ID", either hidapi or
# Linux does not seem to send the report otherwise.
self._hid_handle.send_feature_report(
bytes((_REPORT_SET_STOP_LINE_BREAK, 0)))
def read(self, size=1):
if not self.is_open:
raise PortNotOpenError()
data = bytearray()
try:
timeout = Timeout(self._timeout)
while len(data) < size:
if self._thread is None:
raise SerialException('connection failed (reader thread died)')
buf = self._read_buffer.get(True, timeout.time_left())
if buf is None:
return bytes(data)
data += buf
if timeout.expired():
break
except Queue.Empty: # -> timeout
pass
return bytes(data)
def write(self, data):
if not self.is_open:
raise PortNotOpenError()
data = to_bytes(data)
tx_len = len(data)
while tx_len > 0:
to_be_sent = min(tx_len, 0x3F)
report = to_bytes([to_be_sent]) + data[:to_be_sent]
self._hid_handle.write(report)
data = data[to_be_sent:]
tx_len = len(data)
def _hid_read_loop(self):
try:
while self.is_open:
data = self._hid_handle.read(64, timeout_ms=100)
if not data:
continue
data_len = data.pop(0)
assert data_len == len(data)
self._read_buffer.put(bytearray(data))
finally:
self._thread = None
| mit | -5,908,419,889,556,478,000 | 32.100775 | 102 | 0.582553 | false |
jimsimon/sky_engine | build/config/mac/mac_app.py | 17 | 2776 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import errno
import subprocess
import sys
PLUTIL = [
'/usr/bin/env',
'xcrun',
'plutil'
]
IBTOOL = [
'/usr/bin/env',
'xcrun',
'ibtool',
]
def MakeDirectories(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return 0
else:
return -1
return 0
def ProcessInfoPlist(args):
output_plist_file = os.path.abspath(os.path.join(args.output, 'Info.plist'))
return subprocess.check_call( PLUTIL + [
'-convert',
'binary1',
'-o',
output_plist_file,
'--',
args.input,
])
def ProcessNIB(args):
output_nib_file = os.path.join(os.path.abspath(args.output),
"%s.nib" % os.path.splitext(os.path.basename(args.input))[0])
return subprocess.check_call(IBTOOL + [
'--module',
args.module,
'--auto-activate-custom-fonts',
'--target-device',
'mac',
'--compile',
output_nib_file,
os.path.abspath(args.input),
])
def GenerateProjectStructure(args):
application_path = os.path.join( args.dir, args.name + ".app", "Contents" )
return MakeDirectories( application_path )
def Main():
parser = argparse.ArgumentParser(description='A script that aids in '
'the creation of an Mac application')
subparsers = parser.add_subparsers()
# Plist Parser
plist_parser = subparsers.add_parser('plist',
help='Process the Info.plist')
plist_parser.set_defaults(func=ProcessInfoPlist)
plist_parser.add_argument('-i', dest='input', help='The input plist path')
plist_parser.add_argument('-o', dest='output', help='The output plist dir')
# NIB Parser
plist_parser = subparsers.add_parser('nib',
help='Process a NIB file')
plist_parser.set_defaults(func=ProcessNIB)
plist_parser.add_argument('-i', dest='input', help='The input nib path')
plist_parser.add_argument('-o', dest='output', help='The output nib dir')
plist_parser.add_argument('-m', dest='module', help='The module name')
# Directory Structure Parser
dir_struct_parser = subparsers.add_parser('structure',
help='Creates the directory of an Mac application')
dir_struct_parser.set_defaults(func=GenerateProjectStructure)
dir_struct_parser.add_argument('-d', dest='dir', help='Out directory')
dir_struct_parser.add_argument('-n', dest='name', help='App name')
# Engage!
args = parser.parse_args()
return args.func(args)
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | -6,065,640,667,594,368,000 | 23.566372 | 78 | 0.64121 | false |
narurien/ganeti-ceph | lib/http/__init__.py | 5 | 28293 | #
#
# Copyright (C) 2007, 2008, 2010, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""HTTP module.
"""
import logging
import mimetools
import OpenSSL
import select
import socket
import errno
from cStringIO import StringIO
from ganeti import constants
from ganeti import utils
HTTP_GANETI_VERSION = "Ganeti %s" % constants.RELEASE_VERSION
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_NOT_MODIFIED = 304
HTTP_0_9 = "HTTP/0.9"
HTTP_1_0 = "HTTP/1.0"
HTTP_1_1 = "HTTP/1.1"
HTTP_GET = "GET"
HTTP_HEAD = "HEAD"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
HTTP_ETAG = "ETag"
HTTP_HOST = "Host"
HTTP_SERVER = "Server"
HTTP_DATE = "Date"
HTTP_USER_AGENT = "User-Agent"
HTTP_CONTENT_TYPE = "Content-Type"
HTTP_CONTENT_LENGTH = "Content-Length"
HTTP_CONNECTION = "Connection"
HTTP_KEEP_ALIVE = "Keep-Alive"
HTTP_WWW_AUTHENTICATE = "WWW-Authenticate"
HTTP_AUTHORIZATION = "Authorization"
HTTP_AUTHENTICATION_INFO = "Authentication-Info"
HTTP_ALLOW = "Allow"
HTTP_APP_OCTET_STREAM = "application/octet-stream"
HTTP_APP_JSON = "application/json"
_SSL_UNEXPECTED_EOF = "Unexpected EOF"
# Socket operations
(SOCKOP_SEND,
SOCKOP_RECV,
SOCKOP_SHUTDOWN,
SOCKOP_HANDSHAKE) = range(4)
# send/receive quantum
SOCK_BUF_SIZE = 32768
class HttpError(Exception):
"""Internal exception for HTTP errors.
This should only be used for internal error reporting.
"""
class HttpConnectionClosed(Exception):
"""Internal exception for a closed connection.
This should only be used for internal error reporting. Only use
it if there's no other way to report this condition.
"""
class HttpSessionHandshakeUnexpectedEOF(HttpError):
"""Internal exception for errors during SSL handshake.
This should only be used for internal error reporting.
"""
class HttpSocketTimeout(Exception):
"""Internal exception for socket timeouts.
This should only be used for internal error reporting.
"""
class HttpException(Exception):
code = None
message = None
def __init__(self, message=None, headers=None):
Exception.__init__(self)
self.message = message
self.headers = headers
class HttpBadRequest(HttpException):
"""400 Bad Request
RFC2616, 10.4.1: The request could not be understood by the server
due to malformed syntax. The client SHOULD NOT repeat the request
without modifications.
"""
code = 400
class HttpUnauthorized(HttpException):
"""401 Unauthorized
RFC2616, section 10.4.2: The request requires user
authentication. The response MUST include a WWW-Authenticate header
field (section 14.47) containing a challenge applicable to the
requested resource.
"""
code = 401
class HttpForbidden(HttpException):
"""403 Forbidden
RFC2616, 10.4.4: The server understood the request, but is refusing
to fulfill it. Authorization will not help and the request SHOULD
NOT be repeated.
"""
code = 403
class HttpNotFound(HttpException):
"""404 Not Found
RFC2616, 10.4.5: The server has not found anything matching the
Request-URI. No indication is given of whether the condition is
temporary or permanent.
"""
code = 404
class HttpMethodNotAllowed(HttpException):
"""405 Method Not Allowed
RFC2616, 10.4.6: The method specified in the Request-Line is not
allowed for the resource identified by the Request-URI. The response
MUST include an Allow header containing a list of valid methods for
the requested resource.
"""
code = 405
class HttpNotAcceptable(HttpException):
"""406 Not Acceptable
RFC2616, 10.4.7: The resource identified by the request is only capable of
generating response entities which have content characteristics not
acceptable according to the accept headers sent in the request.
"""
code = 406
class HttpRequestTimeout(HttpException):
"""408 Request Timeout
RFC2616, 10.4.9: The client did not produce a request within the
time that the server was prepared to wait. The client MAY repeat the
request without modifications at any later time.
"""
code = 408
class HttpConflict(HttpException):
"""409 Conflict
RFC2616, 10.4.10: The request could not be completed due to a
conflict with the current state of the resource. This code is only
allowed in situations where it is expected that the user might be
able to resolve the conflict and resubmit the request.
"""
code = 409
class HttpGone(HttpException):
"""410 Gone
RFC2616, 10.4.11: The requested resource is no longer available at
the server and no forwarding address is known. This condition is
expected to be considered permanent.
"""
code = 410
class HttpLengthRequired(HttpException):
"""411 Length Required
RFC2616, 10.4.12: The server refuses to accept the request without a
defined Content-Length. The client MAY repeat the request if it adds
a valid Content-Length header field containing the length of the
message-body in the request message.
"""
code = 411
class HttpPreconditionFailed(HttpException):
"""412 Precondition Failed
RFC2616, 10.4.13: The precondition given in one or more of the
request-header fields evaluated to false when it was tested on the
server.
"""
code = 412
class HttpUnsupportedMediaType(HttpException):
"""415 Unsupported Media Type
RFC2616, 10.4.16: The server is refusing to service the request because the
entity of the request is in a format not supported by the requested resource
for the requested method.
"""
code = 415
class HttpInternalServerError(HttpException):
"""500 Internal Server Error
RFC2616, 10.5.1: The server encountered an unexpected condition
which prevented it from fulfilling the request.
"""
code = 500
class HttpNotImplemented(HttpException):
"""501 Not Implemented
RFC2616, 10.5.2: The server does not support the functionality
required to fulfill the request.
"""
code = 501
class HttpBadGateway(HttpException):
"""502 Bad Gateway
RFC2616, 10.5.3: The server, while acting as a gateway or proxy,
received an invalid response from the upstream server it accessed in
attempting to fulfill the request.
"""
code = 502
class HttpServiceUnavailable(HttpException):
"""503 Service Unavailable
RFC2616, 10.5.4: The server is currently unable to handle the
request due to a temporary overloading or maintenance of the server.
"""
code = 503
class HttpGatewayTimeout(HttpException):
"""504 Gateway Timeout
RFC2616, 10.5.5: The server, while acting as a gateway or proxy, did
not receive a timely response from the upstream server specified by
the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server
(e.g. DNS) it needed to access in attempting to complete the
request.
"""
code = 504
class HttpVersionNotSupported(HttpException):
"""505 HTTP Version Not Supported
RFC2616, 10.5.6: The server does not support, or refuses to support,
the HTTP protocol version that was used in the request message.
"""
code = 505
def ParseHeaders(buf):
"""Parses HTTP headers.
@note: This is just a trivial wrapper around C{mimetools.Message}
"""
return mimetools.Message(buf, 0)
def SocketOperation(sock, op, arg1, timeout):
"""Wrapper around socket functions.
This function abstracts error handling for socket operations, especially
for the complicated interaction with OpenSSL.
@type sock: socket
@param sock: Socket for the operation
@type op: int
@param op: Operation to execute (SOCKOP_* constants)
@type arg1: any
@param arg1: Parameter for function (if needed)
@type timeout: None or float
@param timeout: Timeout in seconds or None
@return: Return value of socket function
"""
# TODO: event_poll/event_check/override
if op in (SOCKOP_SEND, SOCKOP_HANDSHAKE):
event_poll = select.POLLOUT
elif op == SOCKOP_RECV:
event_poll = select.POLLIN
elif op == SOCKOP_SHUTDOWN:
event_poll = None
# The timeout is only used when OpenSSL requests polling for a condition.
# It is not advisable to have no timeout for shutdown.
assert timeout
else:
raise AssertionError("Invalid socket operation")
# Handshake is only supported by SSL sockets
if (op == SOCKOP_HANDSHAKE and
not isinstance(sock, OpenSSL.SSL.ConnectionType)):
return
# No override by default
event_override = 0
while True:
# Poll only for certain operations and when asked for by an override
if event_override or op in (SOCKOP_SEND, SOCKOP_RECV, SOCKOP_HANDSHAKE):
if event_override:
wait_for_event = event_override
else:
wait_for_event = event_poll
event = utils.WaitForFdCondition(sock, wait_for_event, timeout)
if event is None:
raise HttpSocketTimeout()
if event & (select.POLLNVAL | select.POLLHUP | select.POLLERR):
# Let the socket functions handle these
break
if not event & wait_for_event:
continue
# Reset override
event_override = 0
try:
try:
if op == SOCKOP_SEND:
return sock.send(arg1)
elif op == SOCKOP_RECV:
return sock.recv(arg1)
elif op == SOCKOP_SHUTDOWN:
if isinstance(sock, OpenSSL.SSL.ConnectionType):
# PyOpenSSL's shutdown() doesn't take arguments
return sock.shutdown()
else:
return sock.shutdown(arg1)
elif op == SOCKOP_HANDSHAKE:
return sock.do_handshake()
except OpenSSL.SSL.WantWriteError:
# OpenSSL wants to write, poll for POLLOUT
event_override = select.POLLOUT
continue
except OpenSSL.SSL.WantReadError:
# OpenSSL wants to read, poll for POLLIN
event_override = select.POLLIN | select.POLLPRI
continue
except OpenSSL.SSL.WantX509LookupError:
continue
except OpenSSL.SSL.ZeroReturnError, err:
# SSL Connection has been closed. In SSL 3.0 and TLS 1.0, this only
# occurs if a closure alert has occurred in the protocol, i.e. the
# connection has been closed cleanly. Note that this does not
# necessarily mean that the transport layer (e.g. a socket) has been
# closed.
if op == SOCKOP_SEND:
# Can happen during a renegotiation
raise HttpConnectionClosed(err.args)
elif op == SOCKOP_RECV:
return ""
# SSL_shutdown shouldn't return SSL_ERROR_ZERO_RETURN
raise socket.error(err.args)
except OpenSSL.SSL.SysCallError, err:
if op == SOCKOP_SEND:
# arg1 is the data when writing
if err.args and err.args[0] == -1 and arg1 == "":
# errors when writing empty strings are expected
# and can be ignored
return 0
if err.args == (-1, _SSL_UNEXPECTED_EOF):
if op == SOCKOP_RECV:
return ""
elif op == SOCKOP_HANDSHAKE:
# Can happen if peer disconnects directly after the connection is
# opened.
raise HttpSessionHandshakeUnexpectedEOF(err.args)
raise socket.error(err.args)
except OpenSSL.SSL.Error, err:
raise socket.error(err.args)
except socket.error, err:
if err.args and err.args[0] == errno.EAGAIN:
# Ignore EAGAIN
continue
raise
def ShutdownConnection(sock, close_timeout, write_timeout, msgreader, force):
"""Closes the connection.
@type sock: socket
@param sock: Socket to be shut down
@type close_timeout: float
@param close_timeout: How long to wait for the peer to close
the connection
@type write_timeout: float
@param write_timeout: Write timeout for shutdown
@type msgreader: http.HttpMessageReader
@param msgreader: Request message reader, used to determine whether
peer should close connection
@type force: bool
@param force: Whether to forcibly close the connection without
waiting for peer
"""
#print msgreader.peer_will_close, force
if msgreader and msgreader.peer_will_close and not force:
# Wait for peer to close
try:
# Check whether it's actually closed
if not SocketOperation(sock, SOCKOP_RECV, 1, close_timeout):
return
except (socket.error, HttpError, HttpSocketTimeout):
# Ignore errors at this stage
pass
# Close the connection from our side
try:
# We don't care about the return value, see NOTES in SSL_shutdown(3).
SocketOperation(sock, SOCKOP_SHUTDOWN, socket.SHUT_RDWR,
write_timeout)
except HttpSocketTimeout:
raise HttpError("Timeout while shutting down connection")
except socket.error, err:
# Ignore ENOTCONN
if not (err.args and err.args[0] == errno.ENOTCONN):
raise HttpError("Error while shutting down connection: %s" % err)
def Handshake(sock, write_timeout):
"""Shakes peer's hands.
@type sock: socket
@param sock: Socket to be shut down
@type write_timeout: float
@param write_timeout: Write timeout for handshake
"""
try:
return SocketOperation(sock, SOCKOP_HANDSHAKE, None, write_timeout)
except HttpSocketTimeout:
raise HttpError("Timeout during SSL handshake")
except socket.error, err:
raise HttpError("Error in SSL handshake: %s" % err)
class HttpSslParams(object):
"""Data class for SSL key and certificate.
"""
def __init__(self, ssl_key_path, ssl_cert_path):
"""Initializes this class.
@type ssl_key_path: string
@param ssl_key_path: Path to file containing SSL key in PEM format
@type ssl_cert_path: string
@param ssl_cert_path: Path to file containing SSL certificate
in PEM format
"""
self.ssl_key_pem = utils.ReadFile(ssl_key_path)
self.ssl_cert_pem = utils.ReadFile(ssl_cert_path)
self.ssl_cert_path = ssl_cert_path
def GetKey(self):
return OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.ssl_key_pem)
def GetCertificate(self):
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
self.ssl_cert_pem)
class HttpBase(object):
"""Base class for HTTP server and client.
"""
def __init__(self):
self.using_ssl = None
self._ssl_params = None
self._ssl_key = None
self._ssl_cert = None
def _CreateSocket(self, ssl_params, ssl_verify_peer, family):
"""Creates a TCP socket and initializes SSL if needed.
@type ssl_params: HttpSslParams
@param ssl_params: SSL key and certificate
@type ssl_verify_peer: bool
@param ssl_verify_peer: Whether to require client certificate
and compare it with our certificate
@type family: int
@param family: socket.AF_INET | socket.AF_INET6
"""
assert family in (socket.AF_INET, socket.AF_INET6)
self._ssl_params = ssl_params
sock = socket.socket(family, socket.SOCK_STREAM)
# Should we enable SSL?
self.using_ssl = ssl_params is not None
if not self.using_ssl:
return sock
self._ssl_key = ssl_params.GetKey()
self._ssl_cert = ssl_params.GetCertificate()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2)
ciphers = self.GetSslCiphers()
logging.debug("Setting SSL cipher string %s", ciphers)
ctx.set_cipher_list(ciphers)
ctx.use_privatekey(self._ssl_key)
ctx.use_certificate(self._ssl_cert)
ctx.check_privatekey()
if ssl_verify_peer:
ctx.set_verify(OpenSSL.SSL.VERIFY_PEER |
OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
self._SSLVerifyCallback)
# Also add our certificate as a trusted CA to be sent to the client.
# This is required at least for GnuTLS clients to work.
try:
# This will fail for PyOpenssl versions before 0.10
ctx.add_client_ca(self._ssl_cert)
except AttributeError:
# Fall back to letting OpenSSL read the certificate file directly.
ctx.load_client_ca(ssl_params.ssl_cert_path)
return OpenSSL.SSL.Connection(ctx, sock)
def GetSslCiphers(self): # pylint: disable=R0201
"""Returns the ciphers string for SSL.
"""
return constants.OPENSSL_CIPHERS
def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):
"""Verify the certificate provided by the peer
We only compare fingerprints. The client must use the same certificate as
we do on our side.
"""
# some parameters are unused, but this is the API
# pylint: disable=W0613
assert self._ssl_params, "SSL not initialized"
return (self._ssl_cert.digest("sha1") == cert.digest("sha1") and
self._ssl_cert.digest("md5") == cert.digest("md5"))
class HttpMessage(object):
"""Data structure for HTTP message.
"""
def __init__(self):
self.start_line = None
self.headers = None
self.body = None
class HttpClientToServerStartLine(object):
"""Data structure for HTTP request start line.
"""
def __init__(self, method, path, version):
self.method = method
self.path = path
self.version = version
def __str__(self):
return "%s %s %s" % (self.method, self.path, self.version)
class HttpServerToClientStartLine(object):
"""Data structure for HTTP response start line.
"""
def __init__(self, version, code, reason):
self.version = version
self.code = code
self.reason = reason
def __str__(self):
return "%s %s %s" % (self.version, self.code, self.reason)
class HttpMessageWriter(object):
"""Writes an HTTP message to a socket.
"""
def __init__(self, sock, msg, write_timeout):
"""Initializes this class and writes an HTTP message to a socket.
@type sock: socket
@param sock: Socket to be written to
@type msg: http.HttpMessage
@param msg: HTTP message to be written
@type write_timeout: float
@param write_timeout: Write timeout for socket
"""
self._msg = msg
self._PrepareMessage()
buf = self._FormatMessage()
pos = 0
end = len(buf)
while pos < end:
# Send only SOCK_BUF_SIZE bytes at a time
data = buf[pos:(pos + SOCK_BUF_SIZE)]
sent = SocketOperation(sock, SOCKOP_SEND, data, write_timeout)
# Remove sent bytes
pos += sent
assert pos == end, "Message wasn't sent completely"
def _PrepareMessage(self):
"""Prepares the HTTP message by setting mandatory headers.
"""
# RFC2616, section 4.3: "The presence of a message-body in a request is
# signaled by the inclusion of a Content-Length or Transfer-Encoding header
# field in the request's message-headers."
if self._msg.body:
self._msg.headers[HTTP_CONTENT_LENGTH] = len(self._msg.body)
def _FormatMessage(self):
"""Serializes the HTTP message into a string.
"""
buf = StringIO()
# Add start line
buf.write(str(self._msg.start_line))
buf.write("\r\n")
# Add headers
if self._msg.start_line.version != HTTP_0_9:
for name, value in self._msg.headers.iteritems():
buf.write("%s: %s\r\n" % (name, value))
buf.write("\r\n")
# Add message body if needed
if self.HasMessageBody():
buf.write(self._msg.body)
elif self._msg.body:
logging.warning("Ignoring message body")
return buf.getvalue()
def HasMessageBody(self):
"""Checks whether the HTTP message contains a body.
Can be overridden by subclasses.
"""
return bool(self._msg.body)
class HttpMessageReader(object):
"""Reads HTTP message from socket.
"""
# Length limits
START_LINE_LENGTH_MAX = None
HEADER_LENGTH_MAX = None
# Parser state machine
PS_START_LINE = "start-line"
PS_HEADERS = "headers"
PS_BODY = "entity-body"
PS_COMPLETE = "complete"
def __init__(self, sock, msg, read_timeout):
"""Reads an HTTP message from a socket.
@type sock: socket
@param sock: Socket to be read from
@type msg: http.HttpMessage
@param msg: Object for the read message
@type read_timeout: float
@param read_timeout: Read timeout for socket
"""
self.sock = sock
self.msg = msg
self.start_line_buffer = None
self.header_buffer = StringIO()
self.body_buffer = StringIO()
self.parser_status = self.PS_START_LINE
self.content_length = None
self.peer_will_close = None
buf = ""
eof = False
while self.parser_status != self.PS_COMPLETE:
# TODO: Don't read more than necessary (Content-Length), otherwise
# data might be lost and/or an error could occur
data = SocketOperation(sock, SOCKOP_RECV, SOCK_BUF_SIZE, read_timeout)
if data:
buf += data
else:
eof = True
# Do some parsing and error checking while more data arrives
buf = self._ContinueParsing(buf, eof)
# Must be done only after the buffer has been evaluated
# TODO: Content-Length < len(data read) and connection closed
if (eof and
self.parser_status in (self.PS_START_LINE,
self.PS_HEADERS)):
raise HttpError("Connection closed prematurely")
# Parse rest
buf = self._ContinueParsing(buf, True)
assert self.parser_status == self.PS_COMPLETE
assert not buf, "Parser didn't read full response"
# Body is complete
msg.body = self.body_buffer.getvalue()
def _ContinueParsing(self, buf, eof):
"""Main function for HTTP message state machine.
@type buf: string
@param buf: Receive buffer
@type eof: bool
@param eof: Whether we've reached EOF on the socket
@rtype: string
@return: Updated receive buffer
"""
# TODO: Use offset instead of slicing when possible
if self.parser_status == self.PS_START_LINE:
# Expect start line
while True:
idx = buf.find("\r\n")
# RFC2616, section 4.1: "In the interest of robustness, servers SHOULD
# ignore any empty line(s) received where a Request-Line is expected.
# In other words, if the server is reading the protocol stream at the
# beginning of a message and receives a CRLF first, it should ignore
# the CRLF."
if idx == 0:
# TODO: Limit number of CRLFs/empty lines for safety?
buf = buf[2:]
continue
if idx > 0:
self.start_line_buffer = buf[:idx]
self._CheckStartLineLength(len(self.start_line_buffer))
# Remove status line, including CRLF
buf = buf[idx + 2:]
self.msg.start_line = self.ParseStartLine(self.start_line_buffer)
self.parser_status = self.PS_HEADERS
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckStartLineLength(len(buf))
break
# TODO: Handle messages without headers
if self.parser_status == self.PS_HEADERS:
# Wait for header end
idx = buf.find("\r\n\r\n")
if idx >= 0:
self.header_buffer.write(buf[:idx + 2])
self._CheckHeaderLength(self.header_buffer.tell())
# Remove headers, including CRLF
buf = buf[idx + 4:]
self._ParseHeaders()
self.parser_status = self.PS_BODY
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckHeaderLength(len(buf))
if self.parser_status == self.PS_BODY:
# TODO: Implement max size for body_buffer
self.body_buffer.write(buf)
buf = ""
# Check whether we've read everything
#
# RFC2616, section 4.4: "When a message-body is included with a message,
# the transfer-length of that body is determined by one of the following
# [...] 5. By the server closing the connection. (Closing the connection
# cannot be used to indicate the end of a request body, since that would
# leave no possibility for the server to send back a response.)"
#
# TODO: Error when buffer length > Content-Length header
if (eof or
self.content_length is None or
(self.content_length is not None and
self.body_buffer.tell() >= self.content_length)):
self.parser_status = self.PS_COMPLETE
return buf
def _CheckStartLineLength(self, length):
"""Limits the start line buffer size.
@type length: int
@param length: Buffer size
"""
if (self.START_LINE_LENGTH_MAX is not None and
length > self.START_LINE_LENGTH_MAX):
raise HttpError("Start line longer than %d chars" %
self.START_LINE_LENGTH_MAX)
def _CheckHeaderLength(self, length):
"""Limits the header buffer size.
@type length: int
@param length: Buffer size
"""
if (self.HEADER_LENGTH_MAX is not None and
length > self.HEADER_LENGTH_MAX):
raise HttpError("Headers longer than %d chars" % self.HEADER_LENGTH_MAX)
def ParseStartLine(self, start_line):
"""Parses the start line of a message.
Must be overridden by subclass.
@type start_line: string
@param start_line: Start line string
"""
raise NotImplementedError()
def _WillPeerCloseConnection(self):
"""Evaluate whether peer will close the connection.
@rtype: bool
@return: Whether peer will close the connection
"""
# RFC2616, section 14.10: "HTTP/1.1 defines the "close" connection option
# for the sender to signal that the connection will be closed after
# completion of the response. For example,
#
# Connection: close
#
# in either the request or the response header fields indicates that the
# connection SHOULD NOT be considered `persistent' (section 8.1) after the
# current request/response is complete."
hdr_connection = self.msg.headers.get(HTTP_CONNECTION, None)
if hdr_connection:
hdr_connection = hdr_connection.lower()
# An HTTP/1.1 server is assumed to stay open unless explicitly closed.
if self.msg.start_line.version == HTTP_1_1:
return (hdr_connection and "close" in hdr_connection)
# Some HTTP/1.0 implementations have support for persistent connections,
# using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.headers.get(HTTP_KEEP_ALIVE):
return False
# At least Akamai returns a "Connection: Keep-Alive" header, which was
# supposed to be sent by the client.
if hdr_connection and "keep-alive" in hdr_connection:
return False
return True
def _ParseHeaders(self):
"""Parses the headers.
This function also adjusts internal variables based on header values.
RFC2616, section 4.3: The presence of a message-body in a request is
signaled by the inclusion of a Content-Length or Transfer-Encoding header
field in the request's message-headers.
"""
# Parse headers
self.header_buffer.seek(0, 0)
self.msg.headers = ParseHeaders(self.header_buffer)
self.peer_will_close = self._WillPeerCloseConnection()
# Do we have a Content-Length header?
hdr_content_length = self.msg.headers.get(HTTP_CONTENT_LENGTH, None)
if hdr_content_length:
try:
self.content_length = int(hdr_content_length)
except (TypeError, ValueError):
self.content_length = None
if self.content_length is not None and self.content_length < 0:
self.content_length = None
# if the connection remains open and a content-length was not provided,
# then assume that the connection WILL close.
if self.content_length is None:
self.peer_will_close = True
| gpl-2.0 | 7,959,725,630,880,990,000 | 26.902367 | 79 | 0.675149 | false |
26huitailang/bsawf | cli/commands/cmd_loc.py | 2 | 1137 | from subprocess import check_output
import click
def count_locs(file_type, comment_pattern):
"""
Detect if a program is on the system path.
:param file_type: Which file type will be searched?
:param file_type: str
:param comment_pattern: Escaped characters that are comments
:param comment_pattern: str
:return: str
"""
find = "find . -name '*.{0}' -print0".format(file_type)
sed_pattern = "'/^\s*{0}/d;/^\s*$/d'".format(comment_pattern)
cmd = "{0} | xargs -0 sed {1} | wc -l".format(find, sed_pattern)
return check_output(cmd, shell=True).decode('utf-8').replace('\n', '')
@click.command()
def cli():
"""
Count lines of code in the project.
:return: None
"""
file_types = (
['Python', 'py', '#'],
['HTML', 'html', '<!--'],
['CSS', 'css', '\/\*'],
['JS', 'js', '\/\/']
)
click.echo('Lines of code\n-------------')
for file_type in file_types:
click.echo("{0}: {1}".format(file_type[0], count_locs(file_type[1],
file_type[2])))
return None
| gpl-2.0 | 7,817,523,204,550,687,000 | 24.840909 | 77 | 0.524186 | false |
abadger/ansible-modules-core | network/netvisor/pn_vrouter.py | 19 | 13209 | #!/usr/bin/python
""" PN CLI vrouter-create/vrouter-delete/vrouter-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_vrouter
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1
short_description: CLI command to create/delete/modify a vrouter.
description:
- Execute vrouter-create, vrouter-delete, vrouter-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
- C(vrouter-create) creates a new vRouter service.
- C(vrouter-delete) deletes a vRouter service.
- C(vrouter-modify) modifies a vRouter service.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to create vrouter,
'absent' to delete vrouter and 'update' to modify vrouter.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name of the vRouter.
required: true
pn_vnet:
description:
- Specify the name of the VNET.
- Required for vrouter-create.
pn_service_type:
description:
- Specify if the vRouter is a dedicated or shared VNET service.
choices: ['dedicated', 'shared']
pn_service_state:
description:
- Specify to enable or disable vRouter service.
choices: ['enable', 'disable']
pn_router_type:
description:
- Specify if the vRouter uses software or hardware.
- Note that if you specify hardware as router type, you cannot assign IP
addresses using DHCP. You must specify a static IP address.
choices: ['hardware', 'software']
pn_hw_vrrp_id:
description:
- Specifies the VRRP ID for a hardware vrouter.
pn_router_id:
description:
- Specify the vRouter IP address.
pn_bgp_as:
description:
- Specify the Autonomous System Number(ASN) if the vRouter runs Border
Gateway Protocol(BGP).
pn_bgp_redistribute:
description:
- Specify how BGP routes are redistributed.
choices: ['static', 'connected', 'rip', 'ospf']
pn_bgp_max_paths:
description:
- Specify the maximum number of paths for BGP. This is a number between
1 and 255 or 0 to unset.
pn_bgp_options:
description:
- Specify other BGP options as a whitespaces separated string within
single quotes ''.
pn_rip_redistribute:
description:
- Specify how RIP routes are redistributed.
choices: ['static', 'connected', 'ospf', 'bgp']
pn_ospf_redistribute:
description:
- Specify how OSPF routes are redistributed.
choices: ['static', 'connected', 'bgp', 'rip']
pn_ospf_options:
description:
- Specify other OSPF options as a whitespaces separated string within
single quotes ''.
"""
EXAMPLES = """
- name: create vrouter
pn_vrouter:
state: 'present'
pn_name: 'ansible-vrouter'
pn_vnet: 'ansible-fab-global'
pn_router_id: 208.74.182.1
- name: delete vrouter
pn_vrouter:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouter command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouter command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
VROUTER_NAME_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the vlan-show command.
A switch can have only one vRouter configuration.
If a vRouter already exists on the given switch, return VROUTER_EXISTS as
True else False.
If a vRouter with the given name exists(on a different switch), return
VROUTER_NAME_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, VROUTER_NAME_EXISTS
"""
name = module.params['pn_name']
# Global flags
global VROUTER_EXISTS, VROUTER_NAME_EXISTS
# Get the name of the local switch
location = cli + ' switch-setup-show format switch-name'
location = shlex.split(location)
out = module.run_command(location)[1]
location = out.split()[1]
# Check for any vRouters on the switch
check_vrouter = cli + ' vrouter-show location %s ' % location
check_vrouter += 'format name no-show-headers'
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
if out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for any vRouters with the given name
show = cli + ' vrouter-show format name no-show-headers '
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if name in out:
VROUTER_NAME_EXISTS = True
else:
VROUTER_NAME_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-create'
if state == 'absent':
command = 'vrouter-delete'
if state == 'update':
command = 'vrouter-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_vnet=dict(type='str'),
pn_service_type=dict(type='str', choices=['dedicated', 'shared']),
pn_service_state=dict(type='str', choices=['enable', 'disable']),
pn_router_type=dict(type='str', choices=['hardware', 'software']),
pn_hw_vrrp_id=dict(type='int'),
pn_router_id=dict(type='str'),
pn_bgp_as=dict(type='int'),
pn_bgp_redistribute=dict(type='str', choices=['static', 'connected',
'rip', 'ospf']),
pn_bgp_max_paths=dict(type='int'),
pn_bgp_options=dict(type='str'),
pn_rip_redistribute=dict(type='str', choices=['static', 'connected',
'bgp', 'ospf']),
pn_ospf_redistribute=dict(type='str', choices=['static', 'connected',
'bgp', 'rip']),
pn_ospf_options=dict(type='str'),
pn_vrrp_track_port=dict(type='str')
),
required_if=(
["state", "present", ["pn_name", "pn_vnet"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
vnet = module.params['pn_vnet']
service_type = module.params['pn_service_type']
service_state = module.params['pn_service_state']
router_type = module.params['pn_router_type']
hw_vrrp_id = module.params['pn_hw_vrrp_id']
router_id = module.params['pn_router_id']
bgp_as = module.params['pn_bgp_as']
bgp_redistribute = module.params['pn_bgp_redistribute']
bgp_max_paths = module.params['pn_bgp_max_paths']
bgp_options = module.params['pn_bgp_options']
rip_redistribute = module.params['pn_rip_redistribute']
ospf_redistribute = module.params['pn_ospf_redistribute']
ospf_options = module.params['pn_ospf_options']
vrrp_track_port = module.params['pn_vrrp_track_port']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'vrouter-delete':
check_cli(module, cli)
if VROUTER_NAME_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'vrouter-create':
check_cli(module, cli)
if VROUTER_EXISTS is True:
module.exit_json(
skipped=True,
msg='Maximum number of vRouters has been reached on this '
'switch'
)
if VROUTER_NAME_EXISTS is True:
module.exit_json(
skipped=True,
msg='vRouter with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
if vnet:
cli += ' vnet ' + vnet
if service_type:
cli += ' %s-vnet-service ' % service_type
if service_state:
cli += ' ' + service_state
if router_type:
cli += ' router-type ' + router_type
if hw_vrrp_id:
cli += ' hw-vrrp-id ' + str(hw_vrrp_id)
if router_id:
cli += ' router-id ' + router_id
if bgp_as:
cli += ' bgp-as ' + str(bgp_as)
if bgp_redistribute:
cli += ' bgp-redistribute ' + bgp_redistribute
if bgp_max_paths:
cli += ' bgp-max-paths ' + str(bgp_max_paths)
if bgp_options:
cli += ' %s ' % bgp_options
if rip_redistribute:
cli += ' rip-redistribute ' + rip_redistribute
if ospf_redistribute:
cli += ' ospf-redistribute ' + ospf_redistribute
if ospf_options:
cli += ' %s ' % ospf_options
if vrrp_track_port:
cli += ' vrrp-track-port ' + vrrp_track_port
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 | -5,907,246,054,434,256,000 | 31.06068 | 81 | 0.611856 | false |
Ruide/angr-dev | angr-management/angrmanagement/ui/widgets/qast_viewer.py | 1 | 2770 |
from PySide.QtGui import QFrame, QHBoxLayout, QLabel, QSizePolicy
from PySide.QtCore import QSize
from PySide.QtCore import Qt
import claripy
class QASTViewer(QFrame):
def __init__(self, ast, display_size=True, byte_format=None, parent=None):
super(QASTViewer, self).__init__(parent)
self._ast = ast
self._display_size = display_size
self._byte_format = byte_format
self._size_label = None
self._ast_label = None
self.setFrameShape(QFrame.NoFrame)
self.setLineWidth(0)
self._init_widgets()
#
# Properties
#
@property
def ast(self):
return self._ast
@ast.setter
def ast(self, v):
self._ast = v
self.reload()
#
# Public methods
#
def reload(self):
if self._ast is None:
return
ast = self._ast
# set style
if isinstance(ast, (int, long)) or not ast.symbolic:
self._ast_label.setProperty('class', 'ast_viewer_ast_concrete')
else:
self._ast_label.setProperty('class', 'ast_viewer_ast_symbolic')
# set text
if isinstance(ast, (int, long)):
self._size_label.setText('[Unknown]')
format = "%#x" if self._byte_format is None else self._byte_format
self._ast_label.setText(format % ast)
else:
# claripy.AST
self._size_label.setText("[%d]" % (len(ast) / 8)) # in bytes
if not ast.symbolic:
format = "%#x" if self._byte_format is None else self._byte_format
self._ast_label.setText(format % self._ast._model_concrete.value)
else:
# symbolic
if isinstance(ast, claripy.ast.BV) and ast.op == 'BVS':
var_name = ast.args[0]
self._ast_label.setText(var_name)
else:
self._ast_label.setText(ast.__repr__(max_depth=1))
# reapply the style
self._ast_label.style().unpolish(self._ast_label)
self._ast_label.style().polish(self._ast_label)
#
# Private methods
#
def _init_widgets(self):
layout = QHBoxLayout()
size_label = QLabel()
size_label.setProperty('class', 'ast_viewer_size')
size_label.setAlignment(Qt.AlignRight)
size_label.setMaximumSize(QSize(24, 65536))
self._size_label = size_label
ast_label = QLabel()
self._ast_label = ast_label
if self._ast is not None:
self.reload()
if self._display_size:
layout.addWidget(self._size_label)
layout.addWidget(ast_label)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
| bsd-2-clause | -6,985,445,687,273,895,000 | 26.425743 | 82 | 0.555235 | false |
armersong/letsencrypt | letsencrypt/display/util.py | 22 | 13776 | """Let's Encrypt display."""
import os
import textwrap
import dialog
import zope.interface
from letsencrypt import interfaces
WIDTH = 72
HEIGHT = 20
# Display exit codes
OK = "ok"
"""Display exit code indicating user acceptance."""
CANCEL = "cancel"
"""Display exit code for a user canceling the display."""
HELP = "help"
"""Display exit code when for when the user requests more help."""
class NcursesDisplay(object):
"""Ncurses-based display."""
zope.interface.implements(interfaces.IDisplay)
def __init__(self, width=WIDTH, height=HEIGHT):
super(NcursesDisplay, self).__init__()
self.dialog = dialog.Dialog()
self.width = width
self.height = height
def notification(self, message, height=10, pause=False):
# pylint: disable=unused-argument
"""Display a notification to the user and wait for user acceptance.
.. todo:: It probably makes sense to use one of the transient message
types for pause. It isn't straightforward how best to approach
the matter though given the context of our messages.
http://pythondialog.sourceforge.net/doc/widgets.html#displaying-transient-messages
:param str message: Message to display
:param int height: Height of the dialog box
:param bool pause: Not applicable to NcursesDisplay
"""
self.dialog.msgbox(message, height, width=self.width)
def menu(self, message, choices,
ok_label="OK", cancel_label="Cancel", help_label=""):
"""Display a menu.
:param str message: title of menu
:param choices: menu lines, len must be > 0
:type choices: list of tuples (`tag`, `item`) tags must be unique or
list of items (tags will be enumerated)
:param str ok_label: label of the OK button
:param str help_label: label of the help button
:returns: tuple of the form (`code`, `tag`) where
`code` - `str` display_util exit code
`tag` - `int` index corresponding to the item chosen
:rtype: tuple
"""
menu_options = {
"choices": choices,
"ok_label": ok_label,
"cancel_label": cancel_label,
"help_button": bool(help_label),
"help_label": help_label,
"width": self.width,
"height": self.height,
"menu_height": self.height - 6,
}
# Can accept either tuples or just the actual choices
if choices and isinstance(choices[0], tuple):
# pylint: disable=star-args
code, selection = self.dialog.menu(message, **menu_options)
# Return the selection index
for i, choice in enumerate(choices):
if choice[0] == selection:
return code, i
return code, -1
else:
# "choices" is not formatted the way the dialog.menu expects...
menu_options["choices"] = [
(str(i), choice) for i, choice in enumerate(choices, 1)
]
# pylint: disable=star-args
code, tag = self.dialog.menu(message, **menu_options)
if code == CANCEL:
return code, -1
return code, int(tag) - 1
def input(self, message):
"""Display an input box to the user.
:param str message: Message to display that asks for input.
:returns: tuple of the form (code, string) where
`code` - int display exit code
`string` - input entered by the user
"""
return self.dialog.inputbox(message, width=self.width)
def yesno(self, message, yes_label="Yes", no_label="No"):
"""Display a Yes/No dialog box.
Yes and No label must begin with different letters.
:param str message: message to display to user
:param str yes_label: label on the "yes" button
:param str no_label: label on the "no" button
:returns: if yes_label was selected
:rtype: bool
"""
return self.dialog.DIALOG_OK == self.dialog.yesno(
message, self.height, self.width,
yes_label=yes_label, no_label=no_label)
def checklist(self, message, tags, default_status=True):
"""Displays a checklist.
:param message: Message to display before choices
:param list tags: where each is of type :class:`str` len(tags) > 0
:param bool default_status: If True, items are in a selected state by
default.
:returns: tuple of the form (code, list_tags) where
`code` - int display exit code
`list_tags` - list of str tags selected by the user
"""
choices = [(tag, "", default_status) for tag in tags]
return self.dialog.checklist(
message, width=self.width, height=self.height, choices=choices)
class FileDisplay(object):
"""File-based display."""
zope.interface.implements(interfaces.IDisplay)
def __init__(self, outfile):
super(FileDisplay, self).__init__()
self.outfile = outfile
def notification(self, message, height=10, pause=True):
# pylint: disable=unused-argument
"""Displays a notification and waits for user acceptance.
:param str message: Message to display
:param int height: No effect for FileDisplay
:param bool pause: Whether or not the program should pause for the
user's confirmation
"""
side_frame = "-" * 79
message = self._wrap_lines(message)
self.outfile.write(
"{line}{frame}{line}{msg}{line}{frame}{line}".format(
line=os.linesep, frame=side_frame, msg=message))
if pause:
raw_input("Press Enter to Continue")
def menu(self, message, choices,
ok_label="", cancel_label="", help_label=""):
# pylint: disable=unused-argument
"""Display a menu.
.. todo:: This doesn't enable the help label/button (I wasn't sold on
any interface I came up with for this). It would be a nice feature
:param str message: title of menu
:param choices: Menu lines, len must be > 0
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
:returns: tuple of the form (code, tag) where
code - int display exit code
tag - str corresponding to the item chosen
:rtype: tuple
"""
self._print_menu(message, choices)
code, selection = self._get_valid_int_ans(len(choices))
return code, selection - 1
def input(self, message):
# pylint: disable=no-self-use
"""Accept input from the user.
:param str message: message to display to the user
:returns: tuple of (`code`, `input`) where
`code` - str display exit code
`input` - str of the user's input
:rtype: tuple
"""
ans = raw_input(
textwrap.fill("%s (Enter 'c' to cancel): " % message, 80))
if ans == "c" or ans == "C":
return CANCEL, "-1"
else:
return OK, ans
def yesno(self, message, yes_label="Yes", no_label="No"):
"""Query the user with a yes/no question.
Yes and No label must begin with different letters, and must contain at
least one letter each.
:param str message: question for the user
:param str yes_label: Label of the "Yes" parameter
:param str no_label: Label of the "No" parameter
:returns: True for "Yes", False for "No"
:rtype: bool
"""
side_frame = ("-" * 79) + os.linesep
message = self._wrap_lines(message)
self.outfile.write("{0}{frame}{msg}{0}{frame}".format(
os.linesep, frame=side_frame, msg=message))
while True:
ans = raw_input("{yes}/{no}: ".format(
yes=_parens_around_char(yes_label),
no=_parens_around_char(no_label)))
# Couldn't get pylint indentation right with elif
# elif doesn't matter in this situation
if (ans.startswith(yes_label[0].lower()) or
ans.startswith(yes_label[0].upper())):
return True
if (ans.startswith(no_label[0].lower()) or
ans.startswith(no_label[0].upper())):
return False
def checklist(self, message, tags, default_status=True):
# pylint: disable=unused-argument
"""Display a checklist.
:param str message: Message to display to user
:param list tags: `str` tags to select, len(tags) > 0
:param bool default_status: Not used for FileDisplay
:returns: tuple of (`code`, `tags`) where
`code` - str display exit code
`tags` - list of selected tags
:rtype: tuple
"""
while True:
self._print_menu(message, tags)
code, ans = self.input("Select the appropriate numbers separated "
"by commas and/or spaces")
if code == OK:
indices = separate_list_input(ans)
selected_tags = self._scrub_checklist_input(indices, tags)
if selected_tags:
return code, selected_tags
else:
self.outfile.write(
"** Error - Invalid selection **%s" % os.linesep)
else:
return code, []
def _scrub_checklist_input(self, indices, tags):
# pylint: disable=no-self-use
"""Validate input and transform indices to appropriate tags.
:param list indices: input
:param list tags: Original tags of the checklist
:returns: valid tags the user selected
:rtype: :class:`list` of :class:`str`
"""
# They should all be of type int
try:
indices = [int(index) for index in indices]
except ValueError:
return []
# Remove duplicates
indices = list(set(indices))
# Check all input is within range
for index in indices:
if index < 1 or index > len(tags):
return []
# Transform indices to appropriate tags
return [tags[index - 1] for index in indices]
def _print_menu(self, message, choices):
"""Print a menu on the screen.
:param str message: title of menu
:param choices: Menu lines
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
"""
# Can take either tuples or single items in choices list
if choices and isinstance(choices[0], tuple):
choices = ["%s - %s" % (c[0], c[1]) for c in choices]
# Write out the message to the user
self.outfile.write(
"{new}{msg}{new}".format(new=os.linesep, msg=message))
side_frame = ("-" * 79) + os.linesep
self.outfile.write(side_frame)
# Write out the menu choices
for i, desc in enumerate(choices, 1):
self.outfile.write(
textwrap.fill("{num}: {desc}".format(num=i, desc=desc), 80))
# Keep this outside of the textwrap
self.outfile.write(os.linesep)
self.outfile.write(side_frame)
def _wrap_lines(self, msg): # pylint: disable=no-self-use
"""Format lines nicely to 80 chars.
:param str msg: Original message
:returns: Formatted message respecting newlines in message
:rtype: str
"""
lines = msg.splitlines()
fixed_l = []
for line in lines:
fixed_l.append(textwrap.fill(line, 80))
return os.linesep.join(fixed_l)
def _get_valid_int_ans(self, max_):
"""Get a numerical selection.
:param int max: The maximum entry (len of choices), must be positive
:returns: tuple of the form (`code`, `selection`) where
`code` - str display exit code ('ok' or cancel')
`selection` - int user's selection
:rtype: tuple
"""
selection = -1
if max_ > 1:
input_msg = ("Select the appropriate number "
"[1-{max_}] then [enter] (press 'c' to "
"cancel): ".format(max_=max_))
else:
input_msg = ("Press 1 [enter] to confirm the selection "
"(press 'c' to cancel): ")
while selection < 1:
ans = raw_input(input_msg)
if ans.startswith("c") or ans.startswith("C"):
return CANCEL, -1
try:
selection = int(ans)
if selection < 1 or selection > max_:
selection = -1
raise ValueError
except ValueError:
self.outfile.write(
"{0}** Invalid input **{0}".format(os.linesep))
return OK, selection
def separate_list_input(input_):
"""Separate a comma or space separated list.
:param str input_: input from the user
:returns: strings
:rtype: list
"""
no_commas = input_.replace(",", " ")
# Each string is naturally unicode, this causes problems with M2Crypto SANs
# TODO: check if above is still true when M2Crypto is gone ^
return [str(string) for string in no_commas.split()]
def _parens_around_char(label):
"""Place parens around first character of label.
:param str label: Must contain at least one character
"""
return "({first}){rest}".format(first=label[0], rest=label[1:])
| apache-2.0 | 6,059,927,006,826,664,000 | 31.64455 | 94 | 0.569251 | false |
calebfoss/tensorflow | tensorflow/python/ops/sets.py | 16 | 1163 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for sets.
@@set_size
@@set_intersection
@@set_union
@@set_difference
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.sets_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 2,818,910,167,901,884,400 | 32.228571 | 80 | 0.707653 | false |
stamhe/bitcoin | test/functional/wallet_listreceivedby.py | 1 | 7385 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listreceivedbyaddress RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
class ReceivedByTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
# Generate block to get out of IBD
self.nodes[0].generate(1)
self.log.info("listreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{},
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True)
# Empty Tx
empty_addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0, True),
{"address": empty_addr},
{"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []})
#Test Address filtering
#Only on addr
expected = {"address":addr, "label":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]}
res = self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True, address_filter=addr)
assert_array_result(res, {"address":addr}, expected)
assert_equal(len(res), 1)
#Error on invalid address
assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling")
#Another address receive money
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 2) #Right now 2 entries
other_addr = self.nodes[1].getnewaddress()
txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1)
self.nodes[0].generate(1)
self.sync_all()
#Same test as above should still pass
expected = {"address":addr, "label":"", "amount":Decimal("0.1"), "confirmations":11, "txids":[txid,]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, addr)
assert_array_result(res, {"address":addr}, expected)
assert_equal(len(res), 1)
#Same test as above but with other_addr should still pass
expected = {"address":other_addr, "label":"", "amount":Decimal("0.1"), "confirmations":1, "txids":[txid2,]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_array_result(res, {"address":other_addr}, expected)
assert_equal(len(res), 1)
#Should be two entries though without filter
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 3) #Became 3 entries
#Not on random addr
other_addr = self.nodes[0].getnewaddress() # note on node[0]! just a random addr
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_equal(len(res), 0)
self.log.info("getreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.0"))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
# Trying to getreceivedby for an address the wallet doesn't own should return an error
assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr)
self.log.info("listreceivedbylabel + getreceivedbylabel Test")
# set pre-state
addrArr = self.nodes[1].getnewaddress()
label = self.nodes[1].getaccount(addrArr)
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0]
balance_by_label = self.nodes[1].getreceivedbylabel(label)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbylabel should return received_by_label_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
received_by_label_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label)
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbylabel should return updated received list
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
{"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))})
# getreceivedbylabel should return updated receive total
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label + Decimal("0.1"))
# Create a new label named "mynewlabel" that has a 0 balance
self.nodes[1].getlabeladdress("mynewlabel")
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel(0, True) if r["label"] == "mynewlabel"][0]
# Test includeempty of listreceivedbylabel
assert_equal(received_by_label_json["amount"], Decimal("0.0"))
# Test getreceivedbylabel for 0 amount labels
balance = self.nodes[1].getreceivedbylabel("mynewlabel")
assert_equal(balance, Decimal("0.0"))
if __name__ == '__main__':
ReceivedByTest().main()
| mit | -3,615,646,710,451,650,600 | 46.954545 | 197 | 0.61476 | false |
Ernti/GG-Server | ggs/net/client.py | 1 | 1345 | import json
import socket
import re
from threading import Thread
from ggs.event import Observable
class ReceiveThread(Thread):
def __init__(self, client):
Thread.__init__(self)
self.client = client
def run(self):
while True:
try:
data = self.client.conn.recv(1024)
#print(data)
for match_group in re.finditer("\(([^()]+)\)", data.decode()):
data_json = json.loads(match_group.group(1))
if data:
self.client.handle(data_json)
else:
self.client.fire(type='disconnected')
break
except socket.error:
self.client.fire(type='disconnected')
break
class Client(Observable):
def __init__(self, server, conn, addr):
self.server = server
self.conn = conn
self.addr = addr
self.receive_thread = ReceiveThread(self)
self.receive_thread.start()
Observable.__init__(self)
def send(self, message):
try:
self.conn.send(('(' + json.dumps(message) + ')').encode())
except socket.error:
self.fire(type='disconnected')
def handle(self, message):
self.server.handle(message, self) | gpl-2.0 | 9,134,087,358,805,982,000 | 25.92 | 78 | 0.522677 | false |
jkonecny12/pykickstart | pykickstart/commands/network.py | 7 | 24769 | #
# Chris Lumens <[email protected]>
#
# Copyright 2005, 2006, 2007, 2008 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.constants import BOOTPROTO_BOOTP, BOOTPROTO_DHCP, BOOTPROTO_IBFT, BOOTPROTO_QUERY, BOOTPROTO_STATIC
from pykickstart.options import KSOptionParser
from pykickstart.errors import KickstartValueError, formatErrorMsg
import warnings
from pykickstart.i18n import _
MIN_VLAN_ID = 0
MAX_VLAN_ID = 4095
class FC3_NetworkData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.bootProto = kwargs.get("bootProto", BOOTPROTO_DHCP)
self.dhcpclass = kwargs.get("dhcpclass", "")
self.device = kwargs.get("device", "")
self.essid = kwargs.get("essid", "")
self.ethtool = kwargs.get("ethtool", "")
self.gateway = kwargs.get("gateway", "")
self.hostname = kwargs.get("hostname", "")
self.ip = kwargs.get("ip", "")
self.mtu = kwargs.get("mtu", "")
self.nameserver = kwargs.get("nameserver", "")
self.netmask = kwargs.get("netmask", "")
self.nodns = kwargs.get("nodns", False)
self.onboot = kwargs.get("onboot", True)
self.wepkey = kwargs.get("wepkey", "")
def __eq__(self, y):
if not y:
return False
return self.device and self.device == y.device
def __ne__(self, y):
return not self == y
def _getArgsAsStr(self):
retval = ""
if self.bootProto != "":
retval += " --bootproto=%s" % self.bootProto
if self.dhcpclass != "":
retval += " --dhcpclass=%s" % self.dhcpclass
if self.device != "":
retval += " --device=%s" % self.device
if self.essid != "":
retval += " --essid=\"%s\"" % self.essid
if self.ethtool != "":
retval += " --ethtool=\"%s\"" % self.ethtool
if self.gateway != "":
retval += " --gateway=%s" % self.gateway
if self.hostname != "":
retval += " --hostname=%s" % self.hostname
if self.ip != "":
retval += " --ip=%s" % self.ip
if self.mtu != "":
retval += " --mtu=%s" % self.mtu
if self.nameserver != "":
retval += " --nameserver=%s" % self.nameserver
if self.netmask != "":
retval += " --netmask=%s" % self.netmask
if self.nodns:
retval += " --nodns"
if not self.onboot:
retval += " --onboot=off"
if self.wepkey != "":
retval += " --wepkey=%s" % self.wepkey
return retval
def __str__(self):
retval = BaseData.__str__(self)
retval += "network %s\n" % self._getArgsAsStr()
return retval
class FC4_NetworkData(FC3_NetworkData):
removedKeywords = FC3_NetworkData.removedKeywords
removedAttrs = FC3_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
FC3_NetworkData.__init__(self, *args, **kwargs)
self.notksdevice = kwargs.get("notksdevice", False)
def _getArgsAsStr(self):
retval = FC3_NetworkData._getArgsAsStr(self)
if self.notksdevice:
retval += " --notksdevice"
return retval
class FC6_NetworkData(FC4_NetworkData):
removedKeywords = FC4_NetworkData.removedKeywords
removedAttrs = FC4_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
FC4_NetworkData.__init__(self, *args, **kwargs)
self.noipv4 = kwargs.get("noipv4", False)
self.noipv6 = kwargs.get("noipv6", False)
def _getArgsAsStr(self):
retval = FC4_NetworkData._getArgsAsStr(self)
if self.noipv4:
retval += " --noipv4"
if self.noipv6:
retval += " --noipv6"
return retval
class F8_NetworkData(FC6_NetworkData):
removedKeywords = FC6_NetworkData.removedKeywords
removedAttrs = FC6_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
FC6_NetworkData.__init__(self, *args, **kwargs)
self.ipv6 = kwargs.get("ipv6", "")
def _getArgsAsStr(self):
retval = FC6_NetworkData._getArgsAsStr(self)
if self.ipv6 != "":
retval += " --ipv6=%s" % self.ipv6
return retval
class F16_NetworkData(F8_NetworkData):
removedKeywords = F8_NetworkData.removedKeywords
removedAttrs = F8_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F8_NetworkData.__init__(self, *args, **kwargs)
self.activate = kwargs.get("activate", False)
self.nodefroute = kwargs.get("nodefroute", False)
self.wpakey = kwargs.get("wpakey", "")
def _getArgsAsStr(self):
retval = F8_NetworkData._getArgsAsStr(self)
if self.activate:
retval += " --activate"
if self.nodefroute:
retval += " --nodefroute"
if self.wpakey != "":
retval += " --wpakey=%s" % self.wpakey
return retval
class F19_NetworkData(F16_NetworkData):
removedKeywords = F16_NetworkData.removedKeywords
removedAttrs = F16_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F16_NetworkData.__init__(self, *args, **kwargs)
self.bondslaves = kwargs.get("bondslaves", "")
self.bondopts = kwargs.get("bondopts", "")
self.vlanid = kwargs.get("vlanid", "")
self.ipv6gateway = kwargs.get("ipv6gateway", "")
def _getArgsAsStr(self):
retval = F16_NetworkData._getArgsAsStr(self)
if self.bondslaves != "":
retval += " --bondslaves=%s" % self.bondslaves
if self.bondopts != "":
retval += " --bondopts=%s" % self.bondopts
if self.vlanid:
retval += " --vlanid=%s" % self.vlanid
if self.ipv6gateway:
retval += " --ipv6gateway=%s" % self.ipv6gateway
return retval
class F20_NetworkData(F19_NetworkData):
removedKeywords = F19_NetworkData.removedKeywords
removedAttrs = F19_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F19_NetworkData.__init__(self, *args, **kwargs)
self.teamslaves = kwargs.get("teamslaves", [])
self.teamconfig = kwargs.get("teamconfig", "")
def _getArgsAsStr(self):
retval = F19_NetworkData._getArgsAsStr(self)
# see the tests for format description
if self.teamslaves:
slavecfgs = []
for slave, config in self.teamslaves:
if config:
config = "'" + config + "'"
slavecfgs.append(slave+config)
slavecfgs = ",".join(slavecfgs).replace('"', r'\"')
retval += ' --teamslaves="%s"' % slavecfgs
if self.teamconfig:
retval += ' --teamconfig="%s"' % self.teamconfig.replace('"', r'\"')
return retval
class F21_NetworkData(F20_NetworkData):
removedKeywords = F20_NetworkData.removedKeywords
removedAttrs = F20_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F20_NetworkData.__init__(self, *args, **kwargs)
self.interfacename = kwargs.get("interfacename", "")
def _getArgsAsStr(self):
retval = F20_NetworkData._getArgsAsStr(self)
if self.interfacename:
retval += " --interfacename=%s" % self.interfacename
return retval
class F22_NetworkData(F21_NetworkData):
removedKeywords = F21_NetworkData.removedKeywords
removedAttrs = F21_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F21_NetworkData.__init__(self, *args, **kwargs)
self.bridgeslaves = kwargs.get("bridgeslaves", "")
self.bridgeopts = kwargs.get("bridgeopts", "")
def _getArgsAsStr(self):
retval = F21_NetworkData._getArgsAsStr(self)
if self.bridgeslaves != "":
retval += " --bridgeslaves=%s" % self.bridgeslaves
if self.bridgeopts != "":
retval += " --bridgeopts=%s" % self.bridgeopts
return retval
class RHEL4_NetworkData(FC3_NetworkData):
removedKeywords = FC3_NetworkData.removedKeywords
removedAttrs = FC3_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
FC3_NetworkData.__init__(self, *args, **kwargs)
self.notksdevice = kwargs.get("notksdevice", False)
def _getArgsAsStr(self):
retval = FC3_NetworkData._getArgsAsStr(self)
if self.notksdevice:
retval += " --notksdevice"
return retval
class RHEL6_NetworkData(F8_NetworkData):
removedKeywords = F8_NetworkData.removedKeywords
removedAttrs = F8_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F8_NetworkData.__init__(self, *args, **kwargs)
self.activate = kwargs.get("activate", False)
self.nodefroute = kwargs.get("nodefroute", False)
self.vlanid = kwargs.get("vlanid", "")
self.bondslaves = kwargs.get("bondslaves", "")
self.bondopts = kwargs.get("bondopts", "")
def _getArgsAsStr(self):
retval = F8_NetworkData._getArgsAsStr(self)
if self.activate:
retval += " --activate"
if self.nodefroute:
retval += " --nodefroute"
if self.vlanid:
retval += " --vlanid=%s" % self.vlanid
if self.bondslaves:
retval += " --bondslaves=%s" % self.bondslaves
if self.bondopts:
retval += " --bondopts=%s" % self.bondopts
return retval
class RHEL7_NetworkData(F21_NetworkData):
removedKeywords = F21_NetworkData.removedKeywords
removedAttrs = F21_NetworkData.removedAttrs
def __init__(self, *args, **kwargs):
F21_NetworkData.__init__(self, *args, **kwargs)
self.bridgeslaves = kwargs.get("bridgeslaves", "")
self.bridgeopts = kwargs.get("bridgeopts", "")
def _getArgsAsStr(self):
retval = F21_NetworkData._getArgsAsStr(self)
if self.bridgeslaves != "":
retval += " --bridgeslaves=%s" % self.bridgeslaves
if self.bridgeopts != "":
retval += " --bridgeopts=%s" % self.bridgeopts
return retval
class FC3_Network(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList = [BOOTPROTO_DHCP, BOOTPROTO_BOOTP,
BOOTPROTO_STATIC]
self.op = self._getParser()
self.network = kwargs.get("network", [])
def __str__(self):
retval = ""
for nic in self.network:
retval += nic.__str__()
if retval != "":
return "# Network information\n" + retval
else:
return ""
def _getParser(self):
op = KSOptionParser()
op.add_option("--bootproto", dest="bootProto",
default=BOOTPROTO_DHCP,
choices=self.bootprotoList)
op.add_option("--dhcpclass", dest="dhcpclass")
op.add_option("--device", dest="device")
op.add_option("--essid", dest="essid")
op.add_option("--ethtool", dest="ethtool")
op.add_option("--gateway", dest="gateway")
op.add_option("--hostname", dest="hostname")
op.add_option("--ip", dest="ip")
op.add_option("--mtu", dest="mtu")
op.add_option("--nameserver", dest="nameserver")
op.add_option("--netmask", dest="netmask")
op.add_option("--nodns", dest="nodns", action="store_true",
default=False)
op.add_option("--onboot", dest="onboot", action="store",
type="ksboolean")
op.add_option("--wepkey", dest="wepkey")
return op
def parse(self, args):
(opts, _extra) = self.op.parse_args(args=args, lineno=self.lineno)
nd = self.handler.NetworkData()
self._setToObj(self.op, opts, nd)
nd.lineno = self.lineno
# Check for duplicates in the data list.
if nd in self.dataList():
warnings.warn(_("A network device with the name %s has already been defined.") % nd.device)
return nd
def dataList(self):
return self.network
class FC4_Network(FC3_Network):
removedKeywords = FC3_Network.removedKeywords
removedAttrs = FC3_Network.removedAttrs
def _getParser(self):
op = FC3_Network._getParser(self)
op.add_option("--notksdevice", dest="notksdevice", action="store_true",
default=False)
return op
class FC6_Network(FC4_Network):
removedKeywords = FC4_Network.removedKeywords
removedAttrs = FC4_Network.removedAttrs
def _getParser(self):
op = FC4_Network._getParser(self)
op.add_option("--noipv4", dest="noipv4", action="store_true",
default=False)
op.add_option("--noipv6", dest="noipv6", action="store_true",
default=False)
return op
class F8_Network(FC6_Network):
removedKeywords = FC6_Network.removedKeywords
removedAttrs = FC6_Network.removedAttrs
def _getParser(self):
op = FC6_Network._getParser(self)
op.add_option("--ipv6", dest="ipv6")
return op
class F9_Network(F8_Network):
removedKeywords = F8_Network.removedKeywords
removedAttrs = F8_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
F8_Network.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList.append(BOOTPROTO_QUERY)
def _getParser(self):
op = F8_Network._getParser(self)
op.add_option("--bootproto", dest="bootProto",
default=BOOTPROTO_DHCP,
choices=self.bootprotoList)
return op
class F16_Network(F9_Network):
removedKeywords = F9_Network.removedKeywords
removedAttrs = F9_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
F9_Network.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList.append(BOOTPROTO_IBFT)
def _getParser(self):
op = F9_Network._getParser(self)
op.add_option("--activate", dest="activate", action="store_true",
default=False)
op.add_option("--nodefroute", dest="nodefroute", action="store_true",
default=False)
op.add_option("--wpakey", dest="wpakey", action="store", default="")
return op
class F18_Network(F16_Network):
@property
def hostname(self):
for nd in self.dataList():
if nd.hostname:
return nd.hostname
return None
class F19_Network(F18_Network):
def _getParser(self):
op = F18_Network._getParser(self)
op.add_option("--bondslaves", dest="bondslaves", action="store",
default="")
op.add_option("--bondopts", dest="bondopts", action="store",
default="")
op.add_option("--vlanid", dest="vlanid")
op.add_option("--ipv6gateway", dest="ipv6gateway", action="store",
default="")
return op
class F20_Network(F19_Network):
def _getParser(self):
# see the tests for teamslaves option
def teamslaves_cb(option, opt_str, value, parser):
# value is of: "<DEV1>['<JSON_CONFIG1>'],<DEV2>['<JSON_CONFIG2>'],..."
# for example: "eth1,eth2'{"prio": 100}',eth3"
teamslaves = []
if value:
# Although slaves, having optional config, are separated by ","
# first extract json configs because they can contain the ","
parts = value.split("'")
# parts == ['eth1,eth2', '{"prio": 100}', ',eth3']
# ensure the list has even number of items for further zipping,
# for odd number of items
if len(parts) % 2 == 1:
# if the list ends with an empty string which must be a leftover
# from splitting string not ending with device eg
# "eth1,eth2'{"prio":100"}'"
if not parts[-1]:
# just remove it
parts = parts[:-1]
# if not (our example), add empty config for the last device
else:
parts.append('')
# parts == ['eth1,eth2', '{"prio": 100}', ',eth3', '']
# zip devices with their configs
it = iter(parts)
for devs, cfg in zip(it,it):
# first loop:
# devs == "eth1,eth2", cfg == '{"prio": 100}'
devs = devs.strip(',').split(',')
# devs == ["eth1", "eth2"]
# initialize config of all devs but the last one to empty
for d in devs[:-1]:
teamslaves.append((d, ''))
# teamslaves == [("eth1", '')]
# and set config of the last device
teamslaves.append((devs[-1], cfg))
# teamslaves == [('eth1', ''), ('eth2', '{"prio": 100}']
parser.values.teamslaves = teamslaves
op = F19_Network._getParser(self)
op.add_option("--teamslaves", dest="teamslaves", action="callback",
callback=teamslaves_cb, nargs=1, type="string")
op.add_option("--teamconfig", dest="teamconfig", action="store",
default="")
return op
class F21_Network(F20_Network):
def _getParser(self):
op = F20_Network._getParser(self)
op.add_option("--interfacename", dest="interfacename", action="store",
default="")
return op
class F22_Network(F21_Network):
def _getParser(self):
op = F21_Network._getParser(self)
op.add_option("--bridgeslaves", dest="bridgeslaves", action="store",
default="")
op.add_option("--bridgeopts", dest="bridgeopts", action="store",
default="")
return op
def parse(self, args):
# call the overridden command to do it's job first
retval = F21_Network.parse(self, args)
if retval.bridgeopts:
if not retval.bridgeslaves:
msg = formatErrorMsg(self.lineno, msg=_("Option --bridgeopts requires "\
"--bridgeslaves to be specified"))
raise KickstartValueError(msg)
opts = retval.bridgeopts.split(",")
for opt in opts:
_key, _sep, value = opt.partition("=")
if not value or "=" in value:
msg = formatErrorMsg(self.lineno, msg=_("Bad format of --bridgeopts, expecting key=value options separated by ','"))
raise KickstartValueError(msg)
return retval
class RHEL4_Network(FC3_Network):
removedKeywords = FC3_Network.removedKeywords
removedAttrs = FC3_Network.removedAttrs
def _getParser(self):
op = FC3_Network._getParser(self)
op.add_option("--notksdevice", dest="notksdevice", action="store_true",
default=False)
return op
class RHEL5_Network(FC6_Network):
removedKeywords = FC6_Network.removedKeywords
removedAttrs = FC6_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
FC6_Network.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList.append(BOOTPROTO_QUERY)
def _getParser(self):
op = FC6_Network._getParser(self)
op.add_option("--bootproto", dest="bootProto",
default=BOOTPROTO_DHCP,
choices=self.bootprotoList)
return op
class RHEL6_Network(F9_Network):
removedKeywords = F9_Network.removedKeywords
removedAttrs = F9_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
F9_Network.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList.append(BOOTPROTO_IBFT)
def _getParser(self):
op = F9_Network._getParser(self)
op.add_option("--activate", dest="activate", action="store_true",
default=False)
op.add_option("--nodefroute", dest="nodefroute", action="store_true",
default=False)
op.add_option("--vlanid", dest="vlanid")
op.add_option("--bondslaves", dest="bondslaves")
op.add_option("--bondopts", dest="bondopts")
return op
def validate_network_interface_name(name):
"""Check if the given network interface name is valid, return an error message
if an error is found or None if no errors are found
:param str name: name to validate
:returns: error message or None if no error is found
:rtype: str or NoneType
"""
# (for reference see the NetworkManager source code:
# NetworkManager/src/settings/plugins/ifcfg-rh/reader.c
# and the make_vlan_setting function)
vlan_id = None
# if it contains '.', vlan id should follow (eg 'ens7.171', 'mydev.171')
(vlan, dot, id_candidate) = name.partition(".")
if dot:
# 'vlan' can't be followed by a '.'
if vlan == "vlan":
return _("When using the <prefix>.<vlan id> interface name notation, <prefix> can't be equal to 'vlan'.")
try:
vlan_id = int(id_candidate)
except ValueError:
return _("If network --interfacename contains a '.', valid vlan id should follow.")
# if it starts with 'vlan', vlan id should follow ('vlan171')
(empty, sep, id_candidate) = name.partition("vlan")
if sep and empty == "":
# if we checked only for empty == "", we would evaluate missing interface name as an error
try:
vlan_id = int(id_candidate)
except ValueError:
return _("If network --interfacename starts with 'vlan', valid vlan id should follow.")
# check if the vlan id is in range
if vlan_id is not None:
if not(MIN_VLAN_ID <= vlan_id <= MAX_VLAN_ID):
return _("The vlan id out of the %d-%d vlan id range.") % (MIN_VLAN_ID, MAX_VLAN_ID)
# network interface name seems to be valid (no error found)
return None
class RHEL7_Network(F21_Network):
def _getParser(self):
op = F21_Network._getParser(self)
op.add_option("--bridgeslaves", dest="bridgeslaves", action="store",
default="")
op.add_option("--bridgeopts", dest="bridgeopts", action="store",
default="")
return op
def parse(self, args):
# call the overridden command to do it's job first
retval = F21_Network.parse(self, args)
# validate the network interface name
error_message = validate_network_interface_name(retval.interfacename)
# something is wrong with the interface name
if error_message is not None:
raise KickstartValueError(formatErrorMsg(self.lineno,msg=error_message))
if retval.bridgeopts:
if not retval.bridgeslaves:
msg = formatErrorMsg(self.lineno, msg=_("Option --bridgeopts requires "\
"--bridgeslaves to be specified"))
raise KickstartValueError(msg)
opts = retval.bridgeopts.split(",")
for opt in opts:
_key, _sep, value = opt.partition("=")
if not value or "=" in value:
msg = formatErrorMsg(self.lineno, msg=_("Bad format of --bridgeopts, expecting key=value options separated by ','"))
raise KickstartValueError(msg)
return retval
| gpl-2.0 | 4,870,074,090,182,911,000 | 36.246617 | 136 | 0.585571 | false |
damdam-s/bank-payment | account_banking_payment_transfer/model/account_move_reconcile.py | 11 | 1700 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 ACSONE SA (<http://acsone.eu>).
# Copyright (C) 2014 Akretion (www.akretion.com)
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, workflow, api
class AccountMoveReconcile(models.Model):
_inherit = 'account.move.reconcile'
@api.multi
def unlink(self):
"""
Workflow triggers upon unreconcile. This should go into the core.
"""
line_ids = []
for reconcile in self:
for move_line in reconcile.line_id:
line_ids.append(move_line.id)
res = super(AccountMoveReconcile, self).unlink()
for line_id in line_ids:
workflow.trg_trigger(
self._uid, 'account.move.line', line_id, self._cr)
return res
| agpl-3.0 | -7,925,547,129,787,654,000 | 36.777778 | 78 | 0.6 | false |
joequant/zipline | zipline/assets/assets.py | 8 | 34670 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
import numpy as np
import sqlite3
from sqlite3 import Row
import warnings
from logbook import Logger
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
from zipline.errors import (
ConsumeAssetMetaDataError,
InvalidAssetType,
MultipleSymbolsFound,
RootSymbolNotFound,
SidAssignmentError,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets._assets import (
Asset, Equity, Future
)
log = Logger('assets.py')
# Expected fields for an Asset's metadata
ASSET_FIELDS = [
'sid',
'asset_type',
'symbol',
'root_symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
'notice_date',
'expiration_date',
'contract_multiplier',
# The following fields are for compatibility with other systems
'file_name', # Used as symbol
'company_name', # Used as asset_name
'start_date_nano', # Used as start_date
'end_date_nano', # Used as end_date
]
# Expected fields for an Asset's metadata
ASSET_TABLE_FIELDS = [
'sid',
'symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
]
# Expected fields for an Asset's metadata
FUTURE_TABLE_FIELDS = ASSET_TABLE_FIELDS + [
'root_symbol',
'notice_date',
'expiration_date',
'contract_multiplier',
]
EQUITY_TABLE_FIELDS = ASSET_TABLE_FIELDS
# Create the query once from the fields, so that the join is not done
# repeatedly.
FUTURE_BY_SID_QUERY = 'select {0} from futures where sid=?'.format(
", ".join(FUTURE_TABLE_FIELDS))
EQUITY_BY_SID_QUERY = 'select {0} from equities where sid=?'.format(
", ".join(EQUITY_TABLE_FIELDS))
class AssetFinder(object):
def __init__(self,
metadata=None,
allow_sid_assignment=True,
fuzzy_char=None,
db_path=':memory:',
create_table=True):
self.fuzzy_char = fuzzy_char
# This flag controls if the AssetFinder is allowed to generate its own
# sids. If False, metadata that does not contain a sid will raise an
# exception when building assets.
self.allow_sid_assignment = allow_sid_assignment
if allow_sid_assignment:
self.end_date_to_assign = normalize_date(
pd.Timestamp('now', tz='UTC'))
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
# The AssetFinder also holds a nested-dict of all metadata for
# reference when building Assets
self.metadata_cache = {}
# Create table and read in metadata.
# Should we use flags like 'r', 'w', instead?
# What we need to support is:
# - A 'throwaway' mode where the metadata is read each run.
# - A 'write' mode where the data is written to the provided db_path
# - A 'read' mode where the asset finder uses a prexisting db.
if create_table:
self.create_db_tables()
if metadata is not None:
self.consume_metadata(metadata)
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def create_db_tables(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE equities(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
fuzzy text
)""")
c.execute('CREATE INDEX equities_sid on equities(sid)')
c.execute('CREATE INDEX equities_symbol on equities(symbol)')
c.execute('CREATE INDEX equities_fuzzy on equities(fuzzy)')
c.execute("""
CREATE TABLE futures(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
root_symbol text,
notice_date integer,
expiration_date integer,
contract_multiplier real
)""")
c.execute('CREATE INDEX futures_sid on futures(sid)')
c.execute('CREATE INDEX futures_root_symbol on equities(symbol)')
c.execute("""
CREATE TABLE asset_router
(sid integer,
asset_type text)
""")
c.execute('CREATE INDEX asset_router_sid on asset_router(sid)')
self.conn.commit()
def asset_type_by_sid(self, sid):
try:
return self._asset_type_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
# Python 3 compatibility required forcing to int for sid = 0.
t = (int(sid),)
query = 'select asset_type from asset_router where sid=:sid'
c.execute(query, t)
data = c.fetchone()
if data is None:
return
asset_type = data[0]
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid) for sid in sids]
def _retrieve_equity(self, sid):
try:
return self._equity_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
c.row_factory = Row
t = (int(sid),)
c.execute(EQUITY_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
try:
return self._future_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
t = (int(sid),)
c.row_factory = Row
c.execute(FUTURE_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
if data['notice_date']:
data['notice_date'] = pd.Timestamp(
data['notice_date'], tz='UTC')
if data['expiration_date']:
data['expiration_date'] = pd.Timestamp(
data['expiration_date'], tz='UTC')
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol_resolve_multiple(self, symbol, as_of_date=None):
"""
Return matching Asset of name symbol in database.
If multiple Assets are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Asset was active at as_of_date, and allow_expired is False
raises SymbolNotFound.
"""
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
c = self.conn.cursor()
if as_of_date:
# If one SID exists for symbol, return that symbol
t = (symbol, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
if len(candidates) == 0:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"order by end_date desc "
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc " +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
raise SymbolNotFound(symbol=symbol)
else:
t = (symbol,)
query = ("select sid from equities where symbol=?")
c.execute(query, t)
data = c.fetchall()
if len(data) == 1:
return self._retrieve_equity(data[0][0])
elif not data:
raise SymbolNotFound(symbol=symbol)
else:
options = []
for row in data:
sid = row[0]
asset = self._retrieve_equity(sid)
options.append(asset)
raise MultipleSymbolsFound(symbol=symbol,
options=options)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
If a fuzzy string is provided, then we try various symbols based on
the provided symbol. This is to facilitate mapping from a broker's
symbol to ours in cases where mapping to the broker's symbol loses
information. For example, if we have CMCS_A, but a broker has CMCSA,
when the broker provides CMCSA, it can also provide fuzzy='_',
so we can find a match by inserting an underscore.
"""
symbol = symbol.upper()
as_of_date = normalize_date(as_of_date)
if not fuzzy:
try:
return self.lookup_symbol_resolve_multiple(symbol, as_of_date)
except SymbolNotFound:
return None
else:
c = self.conn.cursor()
fuzzy = symbol.replace(self.fuzzy_char, '')
t = (fuzzy, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where fuzzy=? " +
"and start_date<=? " +
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
# If one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc" +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc. If NaT is given, the
chain is unbounded, and all contracts for this root symbol
are returned.
knowledge_date : pd.Timestamp or pd.NaT
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date. If NaT is given and as_of_date is
is not NaT, the value of as_of_date is used for
knowledge_date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
c = self.conn.cursor()
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
t = {'root_symbol': root_symbol}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
order by notice_date asc
""", t)
else:
if knowledge_date is pd.NaT:
# If knowledge_date is NaT, default to using as_of_date
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': as_of_date.value}
else:
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': knowledge_date.value}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
and :as_of_date < notice_date
and start_date <= :knowledge_date
order by notice_date asc
""", t)
sids = [r[0] for r in c.fetchall()]
if not sids:
# Check if root symbol exists.
c.execute("""
select count(sid) from futures where root_symbol=:root_symbol
""", t)
count = c.fetchone()[0]
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
else:
# If symbol exists, return empty future chain.
return []
return [self._retrieve_futures_contract(sid) for sid in sids]
@property
def sids(self):
c = self.conn.cursor()
query = 'select sid from asset_router'
c.execute(query)
return [r[0] for r in c.fetchall()]
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
try:
matches.append(
self.lookup_symbol_resolve_multiple(
asset_convertible,
as_of_date,
)
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
__________
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
_______
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# If symbols or Assets are provided, construction and mapping is
# necessary
self.consume_identifiers(index)
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _insert_metadata(self, identifier, **kwargs):
"""
Inserts the given metadata kwargs to the entry for the given
identifier. Matching fields in the existing entry will be overwritten.
:param identifier: The identifier for which to insert metadata
:param kwargs: The keyed metadata to insert
"""
if identifier in self.metadata_cache:
# Multiple pass insertion no longer supported.
# This could and probably should raise an Exception, but is
# currently just a short-circuit for compatibility with existing
# testing structure in the test_algorithm module which creates
# multiple sources which all insert redundant metadata.
return
entry = {}
for key, value in kwargs.items():
# Do not accept invalid fields
if key not in ASSET_FIELDS:
continue
# Do not accept Nones
if value is None:
continue
# Do not accept empty strings
if value == '':
continue
# Do not accept nans from dataframes
if isinstance(value, float) and np.isnan(value):
continue
entry[key] = value
# Check if the sid is declared
try:
entry['sid']
except KeyError:
# If the identifier is not a sid, assign one
if hasattr(identifier, '__int__'):
entry['sid'] = identifier.__int__()
else:
if self.allow_sid_assignment:
# Assign the sid the value of its insertion order.
# This assumes that we are assigning values to all assets.
entry['sid'] = len(self.metadata_cache)
else:
raise SidAssignmentError(identifier=identifier)
# If the file_name is in the kwargs, it will be used as the symbol
try:
entry['symbol'] = entry.pop('file_name')
except KeyError:
pass
# If the identifier coming in was a string and there is no defined
# symbol yet, set the symbol to the incoming identifier
try:
entry['symbol']
pass
except KeyError:
if isinstance(identifier, string_types):
entry['symbol'] = identifier
# If the company_name is in the kwargs, it may be the asset_name
try:
company_name = entry.pop('company_name')
try:
entry['asset_name']
except KeyError:
entry['asset_name'] = company_name
except KeyError:
pass
# If dates are given as nanos, pop them
try:
entry['start_date'] = entry.pop('start_date_nano')
except KeyError:
pass
try:
entry['end_date'] = entry.pop('end_date_nano')
except KeyError:
pass
try:
entry['notice_date'] = entry.pop('notice_date_nano')
except KeyError:
pass
try:
entry['expiration_date'] = entry.pop('expiration_date_nano')
except KeyError:
pass
# Process dates to Timestamps
try:
entry['start_date'] = pd.Timestamp(entry['start_date'], tz='UTC')
except KeyError:
# Set a default start_date of the EPOCH, so that all date queries
# work when a start date is not provided.
entry['start_date'] = pd.Timestamp(0, tz='UTC')
try:
# Set a default end_date of 'now', so that all date queries
# work when a end date is not provided.
entry['end_date'] = pd.Timestamp(entry['end_date'], tz='UTC')
except KeyError:
entry['end_date'] = self.end_date_to_assign
try:
entry['notice_date'] = pd.Timestamp(entry['notice_date'],
tz='UTC')
except KeyError:
pass
try:
entry['expiration_date'] = pd.Timestamp(entry['expiration_date'],
tz='UTC')
except KeyError:
pass
# Build an Asset of the appropriate type, default to Equity
asset_type = entry.pop('asset_type', 'equity')
if asset_type.lower() == 'equity':
try:
fuzzy = entry['symbol'].replace(self.fuzzy_char, '') \
if self.fuzzy_char else None
except KeyError:
fuzzy = None
asset = Equity(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
fuzzy)
c.execute("""INSERT INTO equities(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
fuzzy)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'equity')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
elif asset_type.lower() == 'future':
asset = Future(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
asset.root_symbol,
asset.notice_date.value if asset.notice_date else None,
asset.expiration_date.value
if asset.expiration_date else None,
asset.contract_multiplier)
c.execute("""INSERT INTO futures(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
root_symbol,
notice_date,
expiration_date,
contract_multiplier)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'future')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
else:
raise InvalidAssetType(asset_type=asset_type)
self.metadata_cache[identifier] = entry
def consume_identifiers(self, identifiers):
"""
Consumes the given identifiers in to the metadata cache of this
AssetFinder.
"""
for identifier in identifiers:
# Handle case where full Assets are passed in
# For example, in the creation of a DataFrameSource, the source's
# 'sid' args may be full Assets
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
metadata['asset_type'] = identifier.__class__.__name__
self.insert_metadata(identifier=sid, **metadata)
else:
self.insert_metadata(identifier)
def consume_metadata(self, metadata):
"""
Consumes the provided metadata in to the metadata cache. The
existing values in the cache will be overwritten when there
is a conflict.
:param metadata: The metadata to be consumed
"""
# Handle dicts
if isinstance(metadata, dict):
self._insert_metadata_dict(metadata)
# Handle DataFrames
elif isinstance(metadata, pd.DataFrame):
self._insert_metadata_dataframe(metadata)
# Handle readables
elif hasattr(metadata, 'read'):
self._insert_metadata_readable(metadata)
else:
raise ConsumeAssetMetaDataError(obj=metadata)
def clear_metadata(self):
"""
Used for testing.
"""
self.metadata_cache = {}
self.conn = sqlite3.connect(':memory:')
self.create_db_tables()
def insert_metadata(self, identifier, **kwargs):
self._insert_metadata(identifier, **kwargs)
self.conn.commit()
def _insert_metadata_dataframe(self, dataframe):
for identifier, row in dataframe.iterrows():
self._insert_metadata(identifier, **row)
self.conn.commit()
def _insert_metadata_dict(self, dict):
for identifier, entry in dict.items():
self._insert_metadata(identifier, **entry)
self.conn.commit()
def _insert_metadata_readable(self, readable):
for row in readable.read():
# Parse out the row of the readable object
metadata_dict = {}
for field in ASSET_FIELDS:
try:
row_value = row[field]
# Avoid passing placeholders
if row_value and (row_value != 'None'):
metadata_dict[field] = row[field]
except KeyError:
continue
except IndexError:
continue
# Locate the identifier, fail if not found
if 'sid' in metadata_dict:
identifier = metadata_dict['sid']
elif 'symbol' in metadata_dict:
identifier = metadata_dict['symbol']
else:
raise ConsumeAssetMetaDataError(obj=row)
self._insert_metadata(identifier, **metadata_dict)
self.conn.commit()
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
FUTURE OPTIMIZATION: We're looping over a big array, which means this
probably should be in C/Cython.
"""
with self.conn as transaction:
results = transaction.execute(
'SELECT sid, start_date, end_date from equities'
).fetchall()
lifetimes = np.recarray(
shape=(len(results),),
dtype=[('sid', 'i8'), ('start', 'i8'), ('end', 'i8')],
)
# TODO: This is **WAY** slower than it could be because we have to
# check for None everywhere. If we represented "no start date" as
# 0, and "no end date" as MAX_INT in our metadata, this would be
# significantly faster.
NO_START = 0
NO_END = np.iinfo(int).max
for idx, (sid, start, end) in enumerate(results):
lifetimes[idx] = (
sid,
start if start is not None else NO_START,
end if end is not None else NO_END,
)
return lifetimes
def lifetimes(self, dates):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `data`.
See Also
--------
numpy.putmask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
mask = (lifetimes.start <= raw_dates) & (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
| apache-2.0 | 2,541,197,964,576,941,000 | 33.292779 | 79 | 0.542631 | false |
saurabh6790/aimobilize-lib-backup | webnotes/modules/utils.py | 34 | 4137 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, os
import webnotes.modules
from webnotes.utils import cstr
from webnotes.modules import export_doc, get_module_path, scrub
def listfolders(path, only_name=0):
"""
Returns the list of folders (with paths) in the given path,
If only_name is set, it returns only the folder names
"""
out = []
for each in os.listdir(path):
each = cstr(each)
dirname = each.split(os.path.sep)[-1]
fullpath = os.path.join(path, dirname)
if os.path.isdir(fullpath) and not dirname.startswith('.'):
out.append(only_name and dirname or fullpath)
return out
def switch_module(dt, dn, to, frm=None, export=None):
"""
Change the module of the given doctype, if export is true, then also export txt and copy
code files from src
"""
webnotes.conn.sql("update `tab"+dt+"` set module=%s where name=%s", (to, dn))
if export:
export_doc(dt, dn)
# copy code files
if dt in ('DocType', 'Page', 'Report'):
from_path = os.path.join(get_module_path(frm), scrub(dt), scrub(dn), scrub(dn))
to_path = os.path.join(get_module_path(to), scrub(dt), scrub(dn), scrub(dn))
# make dire if exists
os.system('mkdir -p %s' % os.path.join(get_module_path(to), scrub(dt), scrub(dn)))
for ext in ('py','js','html','css'):
os.system('cp %s %s')
def commonify_doclist(doclist, with_comments=1):
"""
Makes a doclist more readable by extracting common properties.
This is used for printing Documents in files
"""
from webnotes.utils import get_common_dict, get_diff_dict
def make_common(doclist):
c = {}
if with_comments:
c['##comment'] = 'These values are common in all dictionaries'
for k in common_keys:
c[k] = doclist[0][k]
return c
def strip_common_and_idx(d):
for k in common_keys:
if k in d: del d[k]
if 'idx' in d: del d['idx']
return d
def make_common_dicts(doclist):
common_dict = {} # one per doctype
# make common dicts for all records
for d in doclist:
if not d['doctype'] in common_dict:
d1 = d.copy()
if d1.has_key("name"):
del d1['name']
common_dict[d['doctype']] = d1
else:
common_dict[d['doctype']] = get_common_dict(common_dict[d['doctype']], d)
return common_dict
common_keys = ['owner','docstatus','creation','modified','modified_by']
common_dict = make_common_dicts(doclist)
# make docs
final = []
for d in doclist:
f = strip_common_and_idx(get_diff_dict(common_dict[d['doctype']], d))
f['doctype'] = d['doctype'] # keep doctype!
# strip name for child records (only an auto generated number!)
if f['doctype'] != doclist[0]['doctype'] and f.has_key("name"):
del f['name']
if with_comments:
f['##comment'] = d['doctype'] + ('name' in f and (', ' + f['name']) or '')
final.append(f)
# add commons
commons = []
for d in common_dict.values():
d['name']='__common__'
if with_comments:
d['##comment'] = 'These values are common for all ' + d['doctype']
commons.append(strip_common_and_idx(d))
common_values = make_common(doclist)
return [common_values]+commons+final
def uncommonify_doclist(dl):
"""
Expands an commonified doclist
"""
# first one has common values
common_values = dl[0]
common_dict = webnotes._dict()
final = []
idx_dict = {}
for d in dl[1:]:
if 'name' in d and d['name']=='__common__':
# common for a doctype -
del d['name']
common_dict[d['doctype']] = d
else:
dt = d['doctype']
if not dt in idx_dict: idx_dict[dt] = 1;
d1 = webnotes._dict(common_values.copy())
# update from common and global
d1.update(common_dict[dt])
d1.update(d)
# idx by sequence
d1['idx'] = idx_dict[dt]
# increment idx
idx_dict[dt] += 1
final.append(d1)
return final
def pprint_doclist(doclist, with_comments = 1):
from json import dumps
return dumps(commonify_doclist(doclist, False), indent=1, sort_keys=True)
def peval_doclist(txt):
from json import loads
try:
return uncommonify_doclist(loads(txt))
except Exception, e:
return uncommonify_doclist(eval(txt))
| mit | -6,476,806,167,198,949,000 | 25.519231 | 90 | 0.656514 | false |
drkitty/metatube | data.py | 1 | 11578 | from __future__ import unicode_literals
import dateutil.parser
import os
import os.path
import subprocess
from itertools import islice
from sys import stderr
import oursql
import requests
from sqlalchemy import (
create_engine, Boolean, Column, DateTime, ForeignKey, Integer, String,
Text
)
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import settings
from client import authentication_request_url, GoogleAPIClient
CHUNK_SIZE = 100000
def utf8mb4_connect(**kwargs):
connection = oursql.Connection(
host=settings.database['host'], user=settings.database['user'],
db=settings.database['db'], **kwargs)
cursor = connection.cursor()
cursor.execute("SET NAMES 'utf8mb4' COLLATE 'utf8mb4_bin'")
return connection
engine = create_engine('mysql+oursql://metatube@localhost/metatubedb',
echo=settings.debug, encoding=b'utf_8',
creator=utf8mb4_connect)
Base = declarative_base()
Session = sessionmaker(bind=engine)
class EverythingManager(object):
def __init__(self):
self.api_client = GoogleAPIClient()
if self.api_client.access_token is None:
print ('Open the following URL in your Web browser and grant '
'metatube read-only access to your account.')
print authentication_request_url
print
print 'Then enter the authorization code here:'
code = raw_input('> ')
self.api_client.get_token_pair(code)
print
self.session = Session()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.session.commit()
else:
self.session.rollback()
self.session.close()
class Video(Base):
__tablename__ = 'video'
id = Column(String(16), primary_key=True)
title = Column(String(400), nullable=False) # TODO: Is this right?
description = Column(Text)
date_published = Column(DateTime, nullable=False)
channel_id = Column(String(40), ForeignKey('channel.id'), nullable=False)
playlist_videos = relationship('PlaylistVideo', backref='video')
video_downloaded = Column(Boolean, nullable=False, default=False)
thumbnail_downloaded = Column(Boolean, nullable=False, default=False)
skip = Column(Boolean, nullable=False, default=False)
def __repr__(self):
return '<Video: "{}">'.format(self.title.encode('ascii', 'replace'))
def download_video(self, mgr):
try:
if os.path.getsize('dl/' + self.id) != 0:
self.video_downloaded = True
mgr.session.commit()
return
except OSError as e:
if e.errno != 2: # 'No such file or directory'
raise
p = subprocess.Popen(
('youtube-dl', '-g', 'https://www.youtube.com/watch?v=' + self.id),
stdout=subprocess.PIPE)
url, _ = p.communicate()
url = url.strip()
if p.returncode != 0:
stderr.write('youtube-dl failed with error code {}\n'.format(
p.returncode))
return
with open('temp', 'w') as f:
for chunk in requests.get(url, stream=True).iter_content(
CHUNK_SIZE):
f.write(chunk)
try:
os.mkdir('dl')
except OSError as e:
if e.errno != 17: # 'File exists'
raise
os.rename('temp', 'dl/' + self.id)
self.video_downloaded = True
mgr.session.commit()
def download_thumbnail(self, mgr):
try:
if os.path.getsize('thumbnails/' + self.id) != 0:
self.thumbnail_downloaded = True
mgr.session.commit()
return
except OSError as e:
if e.errno != 2: # 'No such file or directory'
raise
def process_video(item):
url = item['snippet']['thumbnails']['high']['url']
with open('temp', 'w') as f:
for chunk in requests.get(url, stream=True).iter_content(
CHUNK_SIZE):
f.write(chunk)
try:
os.mkdir('thumbnails')
except OSError as e:
if e.errno != 17: # 'File exists'
raise
os.rename('temp', 'thumbnails/' + self.id)
try:
mgr.api_client.get('/videos', {
'part': 'snippet',
'id': self.id,
'fields': 'items/snippet/thumbnails',
}, process_video)
except Exception as e:
stderr.write('Could not download thumbnail.\n')
stderr.write('Original exception: {}\n'.format(e))
return
self.thumbnail_downloaded = True
mgr.session.commit()
class Playlist(Base):
__tablename__ = 'playlist'
id = Column(String(40), primary_key=True)
title = Column(String(60), nullable=False)
description = Column(Text)
channel_id = Column(String(40), ForeignKey('channel.id'), nullable=False)
playlist_videos = relationship('PlaylistVideo', backref='playlist')
def __repr__(self):
return '<Playlist: "{}">'.format(self.title.encode('ascii', 'replace'))
@classmethod
def fetch_playlists(cls, mgr, ids):
def process_playlist(item):
snippet = item['snippet']
Channel.fetch_channels(mgr, ids=(snippet['channelId'],))
mgr.session.merge(Playlist(
id=item['id'],
title=snippet['title'],
description=snippet['description'],
channel_id=snippet['channelId'],
))
mgr.api_client.get('/playlists', {
'part': 'snippet',
'id': ','.join(ids),
}, process_playlist)
def fetch_playlist_videos(self, mgr):
video_ids = []
playlist_videos = []
def process_playlist_item(item):
snippet = item['snippet']
video_id = snippet['resourceId']['videoId']
video_ids.append(video_id)
playlist_videos.append(PlaylistVideo(
video_id=video_id,
playlist_id=self.id,
position=snippet['position'],
))
mgr.api_client.get('/playlistItems', {
'part': 'snippet',
'fields': 'items/snippet(position,resourceId),nextPageToken',
'playlistId': self.id,
}, process_playlist_item)
fetched_video_ids = []
def process_video(item):
snippet = item['snippet']
Channel.fetch_channels(mgr, ids=(snippet['channelId'],))
v = Video(
id=item['id'],
title=snippet['title'],
description=snippet['description'],
date_published=dateutil.parser.parse(
snippet['publishedAt'].rstrip('Z')),
channel_id=snippet['channelId'],
)
mgr.session.merge(v)
fetched_video_ids.append(item['id'])
video_ids = iter(video_ids)
while True:
video_id_chunk = list(islice(video_ids, 50)) # take 50
if not video_id_chunk:
break
mgr.api_client.get('/videos', {
'part': 'id,snippet',
'id': ','.join(video_id_chunk),
'fields': 'items(id,snippet),nextPageToken',
}, process_video)
for playlist_video in playlist_videos:
# If a video is removed, sometimes its information can be accessed
# via the /playlistItems endpoint even if it's not accessible via
# the /videos endpoint.
if playlist_video.video_id in fetched_video_ids:
mgr.session.merge(playlist_video)
class PlaylistVideo(Base):
__tablename__ = 'playlistvideo'
video_id = Column(
String(16), ForeignKey('video.id', ondelete='CASCADE'),
primary_key=True)
playlist_id = Column(
String(40), ForeignKey('playlist.id', ondelete='CASCADE'),
primary_key=True)
position = Column(Integer, autoincrement=False, primary_key=True)
class Channel(Base):
__tablename__ = 'channel'
id = Column(String(40), primary_key=True)
title = Column(String(200), nullable=False)
description = Column(Text)
mine = Column(Boolean, nullable=False, default=False)
tracked = Column(Boolean, nullable=False, default=False)
playlists = relationship('Playlist', backref='channel')
videos = relationship('Video', backref='channel')
fetched = set()
def __repr__(self):
return '<Channel: "{}">'.format(self.title.encode('ascii', 'replace'))
@classmethod
def fetch_channels(cls, mgr, ids=(), username=None, track=None):
get_mine = bool(not ids and not username)
ids = filter(lambda id: id not in cls.fetched, ids)
if not ids and not username and not get_mine:
return
if ids and username:
raise Exception(
'You cannot call this method with both `ids` and `username`')
def process_channel(item):
snippet = item['snippet']
cls.fetched.add(item['id'])
old = mgr.session.query(Channel).get(item['id'])
if old is not None:
tracked = old.tracked if track is None else track
new_mine = old.mine
else:
tracked = False if track is None else track
new_mine = get_mine
mgr.session.merge(Channel(
id=item['id'],
title=snippet['title'],
description=snippet['description'],
mine=new_mine,
tracked=tracked,
))
params = {
'part': 'id,snippet',
'fields': 'items(id,snippet(title,description)),nextPageToken'
}
if get_mine:
params['mine'] = 'true'
elif ids:
params['id'] = ','.join(ids)
elif username:
params['forUsername'] = username
else:
raise Exception('This should never happen')
mgr.api_client.get('/channels', params, process_channel)
def find_playlists(self, mgr):
playlists = []
def process_playlist(item):
snippet = item['snippet']
playlists.append({
'id': item['id'],
'title': item['snippet']['title'],
})
def process_channel(item):
special_playlists = item['contentDetails']['relatedPlaylists']
playlist_ids = special_playlists.itervalues()
mgr.api_client.get('/playlists', {
'part': 'snippet',
'id': ','.join(playlist_ids),
'fields': 'items(id,snippet/title),nextPageToken',
}, process_playlist)
# Find normal playlists.
mgr.api_client.get('/playlists', {
'part': 'snippet',
'channelId': self.id,
'fields': 'items(id,snippet/title),nextPageToken',
}, process_playlist)
# Find special playlists.
mgr.api_client.get('/channels', {
'part': 'contentDetails',
'id': self.id,
'fields': 'items/contentDetails/relatedPlaylists,nextPageToken',
}, process_channel)
return playlists
| mit | 7,415,975,497,859,556,000 | 31.340782 | 79 | 0.553463 | false |
pra85/calibre | src/calibre/gui2/dialogs/choose_plugin_toolbars.py | 9 | 2372 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__copyright__ = '2008, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
__license__ = 'GPL v3'
from PyQt4.Qt import (QDialog, QVBoxLayout, QLabel, QDialogButtonBox,
QListWidget, QAbstractItemView)
from PyQt4 import QtGui
class ChoosePluginToolbarsDialog(QDialog):
def __init__(self, parent, plugin, locations):
QDialog.__init__(self, parent)
self.locations = locations
self.setWindowTitle(
_('Add "%s" to toolbars or menus')%plugin.name)
self._layout = QVBoxLayout(self)
self.setLayout(self._layout)
self._header_label = QLabel(
_('Select the toolbars and/or menus to add <b>%s</b> to:') %
plugin.name)
self._layout.addWidget(self._header_label)
self._locations_list = QListWidget(self)
self._locations_list.setSelectionMode(QAbstractItemView.MultiSelection)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
self._locations_list.setSizePolicy(sizePolicy)
for key, text in locations:
self._locations_list.addItem(text)
if key in {'toolbar', 'toolbar-device'}:
self._locations_list.item(self._locations_list.count()-1
).setSelected(True)
self._layout.addWidget(self._locations_list)
self._footer_label = QLabel(
_('You can also customise the plugin locations '
'using <b>Preferences -> Customise the toolbar</b>'))
self._layout.addWidget(self._footer_label)
button_box = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
self._layout.addWidget(button_box)
self.resize(self.sizeHint())
def selected_locations(self):
selected = []
for row in self._locations_list.selectionModel().selectedRows():
selected.append(self.locations[row.row()])
return selected
| gpl-3.0 | -5,796,854,673,159,429,000 | 36.0625 | 79 | 0.631113 | false |
motord/Motorcycle-Diaries | lib/gdata/contacts/client.py | 7 | 19998 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from types import ListType, DictionaryType
"""Contains a client to communicate with the Contacts servers.
For documentation on the Contacts API, see:
http://code.google.com/apis/contatcs/
"""
__author__ = '[email protected] (Vince Spicer)'
import gdata.client
import gdata.contacts.data
import atom.data
import atom.http_core
import gdata.gauth
class ContactsClient(gdata.client.GDClient):
api_version = '3'
auth_service = 'cp'
server = "www.google.com"
contact_list = "default"
auth_scopes = gdata.gauth.AUTH_SCOPES['cp']
def __init__(self, domain=None, auth_token=None, **kwargs):
"""Constructs a new client for the Email Settings API.
Args:
domain: string The Google Apps domain (if any).
kwargs: The other parameters to pass to the gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def get_feed_uri(self, kind='contacts', contact_list=None, projection='full',
scheme="http"):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
if kind == 'profiles':
contact_list = 'domain/%s' % self.domain
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
GetFeedUri = get_feed_uri
def get_contact(self, uri, desired_class=gdata.contacts.data.ContactEntry,
auth_token=None, **kwargs):
return self.get_feed(uri, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetContact = get_contact
def create_contact(self, new_contact, insert_uri=None, auth_token=None, **kwargs):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri,
auth_token=auth_token, **kwargs)
CreateContact = create_contact
def add_contact(self, new_contact, insert_uri=None, auth_token=None,
billing_information=None, birthday=None, calendar_link=None, **kwargs):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
contact = gdata.contacts.data.ContactEntry()
if billing_information is not None:
if not isinstance(billing_information, gdata.contacts.data.BillingInformation):
billing_information = gdata.contacts.data.BillingInformation(text=billing_information)
contact.billing_information = billing_information
if birthday is not None:
if not isinstance(birthday, gdata.contacts.data.Birthday):
birthday = gdata.contacts.data.Birthday(when=birthday)
contact.birthday = birthday
if calendar_link is not None:
if type(calendar_link) is not ListType:
calendar_link = [calendar_link]
for link in calendar_link:
if not isinstance(link, gdata.contacts.data.CalendarLink):
if type(link) is not DictionaryType:
raise TypeError, "calendar_link Requires dictionary not %s" % type(link)
link = gdata.contacts.data.CalendarLink(
rel=link.get("rel", None),
label=link.get("label", None),
primary=link.get("primary", None),
href=link.get("href", None),
)
contact.calendar_link.append(link)
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(contact, insert_uri,
auth_token=auth_token, **kwargs)
AddContact = add_contact
def get_contacts(self, desired_class=gdata.contacts.data.ContactsFeed,
auth_token=None, **kwargs):
"""Obtains a feed with the contacts belonging to the current user.
Args:
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of SpreadsheetsClient.
desired_class: class descended from atom.core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (desired_class=None) then the
desired_class will be used in calling the
atom.core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned. Defaults to
gdata.spreadsheets.data.SpreadsheetsFeed.
"""
return self.get_feed(self.GetFeedUri(), auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetContacts = get_contacts
def get_group(self, uri=None, desired_class=gdata.contacts.data.GroupEntry,
auth_token=None, **kwargs):
""" Get a single groups details
Args:
uri: the group uri or id
"""
return self.get(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
GetGroup = get_group
def get_groups(self, uri=None, desired_class=gdata.contacts.data.GroupsFeed,
auth_token=None, **kwargs):
uri = uri or self.GetFeedUri('groups')
return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
GetGroups = get_groups
def create_group(self, new_group, insert_uri=None, url_params=None,
desired_class=None):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
desired_class=desired_class)
CreateGroup = create_group
def update_group(self, edit_uri, updated_group, url_params=None,
escape_params=True, desired_class=None):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
desired_class=desired_class)
UpdateGroup = update_group
def delete_group(self, group_object, auth_token=None, force=False, **kws):
return self.Delete(group_object, auth_token=auth_token, force=force, **kws )
DeleteGroup = delete_group
def change_photo(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
ChangePhoto = change_photo
def get_photo(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url).read()
else:
return None
GetPhoto = get_photo
def delete_photo(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
DeletePhoto = delete_photo
def get_profiles_feed(self, uri=None):
"""Retrieves a feed containing all domain's profiles.
Args:
uri: string (optional) the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full
Returns:
On success, a ProfilesFeed containing the profiles.
On failure, raises a RequestError.
"""
uri = uri or self.GetFeedUri('profiles')
return self.Get(uri,
desired_class=gdata.contacts.data.ProfilesFeed)
GetProfilesFeed = get_profiles_feed
def get_profile(self, uri):
"""Retrieves a domain's profile for the user.
Args:
uri: string the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full/username
Returns:
On success, a ProfileEntry containing the profile for the user.
On failure, raises a RequestError
"""
return self.Get(uri,
desired_class=gdata.contacts.data.ProfileEntry)
GetProfile = get_profile
def update_profile(self, updated_profile, auth_token=None, force=False, **kwargs):
"""Updates an existing profile.
Args:
updated_profile: atom.Entry or subclass containing
the Atom Entry which will replace the profile which is
stored at the edit_url.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others. Represents the current user. Defaults to None
and if None, this method will look for a value in the
auth_token member of ContactsClient.
force: boolean stating whether an update should be forced. Defaults to
False. Normally, if a change has been made since the passed in
entry was obtained, the server will not overwrite the entry since
the changes were based on an obsolete version of the entry.
Setting force to True will cause the update to silently
overwrite whatever version is present.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, raises a RequestError.
"""
return self.Update(updated_profile, auth_token=auth_token, force=force, **kwargs)
UpdateProfile = update_profile
def execute_batch(self, batch_feed, url, desired_class=None):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, desired_class=desired_class)
ExecuteBatch = execute_batch
def execute_batch_profiles(self, batch_feed, url,
desired_class=gdata.contacts.data.ProfilesFeed):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: string The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
gdata.profiles.ProfilesFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ProfilesFeed.
"""
return self.Post(batch_feed, url, desired_class=desired_class)
ExecuteBatchProfiles = execute_batch_profiles
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
class ContactsQuery(gdata.client.Query):
"""
Create a custom Contacts Query
Full specs can be found at: U{Contacts query parameters reference
<http://code.google.com/apis/contacts/docs/3.0/reference.html#Parameters>}
"""
def __init__(self, feed=None, group=None, orderby=None, showdeleted=None,
sortorder=None, requirealldeleted=None, **kwargs):
"""
@param max_results: The maximum number of entries to return. If you want
to receive all of the contacts, rather than only the default maximum, you
can specify a very large number for max-results.
@param start-index: The 1-based index of the first result to be retrieved.
@param updated-min: The lower bound on entry update dates.
@param group: Constrains the results to only the contacts belonging to the
group specified. Value of this parameter specifies group ID
@param orderby: Sorting criterion. The only supported value is
lastmodified.
@param showdeleted: Include deleted contacts in the returned contacts feed
@pram sortorder: Sorting order direction. Can be either ascending or
descending.
@param requirealldeleted: Only relevant if showdeleted and updated-min
are also provided. It dictates the behavior of the server in case it
detects that placeholders of some entries deleted since the point in
time specified as updated-min may have been lost.
"""
gdata.client.Query.__init__(self, **kwargs)
self.group = group
self.orderby = orderby
self.sortorder = sortorder
self.showdeleted = showdeleted
def modify_request(self, http_request):
if self.group:
gdata.client._add_query_param('group', self.group, http_request)
if self.orderby:
gdata.client._add_query_param('orderby', self.orderby, http_request)
if self.sortorder:
gdata.client._add_query_param('sortorder', self.sortorder, http_request)
if self.showdeleted:
gdata.client._add_query_param('showdeleted', self.showdeleted, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
class ProfilesQuery(gdata.client.Query):
def __init__(self, feed=None):
self.feed = feed or 'http://www.google.com/m8/feeds/profiles/default/full'
| bsd-3-clause | -6,521,328,686,490,687,000 | 39.4 | 95 | 0.653915 | false |
sjl/flask-urls | setup.py | 1 | 1159 | """
flask-urls
----------
A collection of URL-related functions for Flask applications.
Links
`````
* `documentation <http://sjl.bitbucket.org/flask-urls/>`_
* `development version
<http://bitbucket.org/sjl/flask-urls/get/tip.gz#egg=flask-urls-dev`_
"""
from setuptools import setup
setup(
name='flask-urls',
version='0.9.2',
url='http://sjl.bitbucket.org/flask-urls/',
license='MIT',
author='Steve Losh',
author_email='[email protected]',
description='A collection of URL-related functions for Flask applications.',
long_description=__doc__,
packages=['flaskext'],
namespace_packages=['flaskext'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| mit | 3,894,851,580,772,052,000 | 24.755556 | 80 | 0.624676 | false |
thedixieflatline/mGameController | pygame/tests/transform_test.py | 23 | 24894 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test import test_utils
from test.test_utils import test_not_implemented, unittest
import pygame
import pygame.transform
from pygame.locals import *
import platform
def show_image(s, images = []):
#pygame.display.init()
size = s.get_rect()[2:]
screen = pygame.display.set_mode(size)
screen.blit(s, (0,0))
pygame.display.flip()
pygame.event.pump()
going = True
idx = 0
while going:
events = pygame.event.get()
for e in events:
if e.type == QUIT:
going = False
if e.type == KEYDOWN:
if e.key in [K_s, K_a]:
if e.key == K_s: idx += 1
if e.key == K_a: idx -= 1
s = images[idx]
screen.blit(s, (0,0))
pygame.display.flip()
pygame.event.pump()
else:
going = False
pygame.display.quit()
pygame.display.init()
def threshold(return_surf, surf, color, threshold = (0,0,0), diff_color = (0,0,0), change_return = True ):
""" given the color it makes return_surf only have areas with the given colour.
"""
width, height =surf.get_width(), surf.get_height()
if change_return:
return_surf.fill(diff_color)
try:
r, g, b = color
except ValueError:
r, g, b, a = color
try:
tr, tg, tb = color
except ValueError:
tr, tg, tb, ta = color
similar = 0
for y in xrange(height):
for x in xrange(width):
c1 = surf.get_at((x,y))
if ( (abs(c1[0] - r) < tr) &
(abs(c1[1] - g) < tg) &
(abs(c1[2] - b) < tb) ):
# this pixel is within the threshold.
if change_return:
return_surf.set_at((x,y), c1)
similar += 1
#else:
# print c1, c2
return similar
class TransformModuleTest( unittest.TestCase ):
#class TransformModuleTest( object ):
#def assertEqual(self, x,x2):
# print x,x2
def test_scale__alpha( self ):
""" see if set_alpha information is kept.
"""
s = pygame.Surface((32,32))
s.set_alpha(55)
self.assertEqual(s.get_alpha(),55)
s = pygame.Surface((32,32))
s.set_alpha(55)
s2 = pygame.transform.scale(s, (64,64))
s3 = s.copy()
self.assertEqual(s.get_alpha(),s3.get_alpha())
self.assertEqual(s.get_alpha(),s2.get_alpha())
def test_scale__destination( self ):
""" see if the destination surface can be passed in to use.
"""
s = pygame.Surface((32,32))
s2 = pygame.transform.scale(s, (64,64))
s3 = s2.copy()
s3 = pygame.transform.scale(s, (64,64), s3)
pygame.transform.scale(s, (64,64), s2)
# the wrong size surface is past in. Should raise an error.
self.assertRaises(ValueError, pygame.transform.scale, s, (33,64), s3)
if 1:
s = pygame.Surface((32,32))
s2 = pygame.transform.smoothscale(s, (64,64))
s3 = s2.copy()
s3 = pygame.transform.smoothscale(s, (64,64), s3)
pygame.transform.smoothscale(s, (64,64), s2)
# the wrong size surface is past in. Should raise an error.
self.assertRaises(ValueError, pygame.transform.smoothscale, s, (33,64), s3)
def test_threshold__honors_third_surface(self):
# __doc__ for threshold as of Tue 07/15/2008
# pygame.transform.threshold(DestSurface, Surface, color, threshold =
# (0,0,0,0), diff_color = (0,0,0,0), change_return = True, Surface =
# None): return num_threshold_pixels
# When given the optional third
# surface, it would use the colors in that rather than the "color"
# specified in the function to check against.
# New in pygame 1.8
################################################################
# Sizes
(w, h) = size = (32, 32)
# the original_color is within the threshold of the threshold_color
threshold = (20, 20, 20, 20)
original_color = (25,25,25,25)
threshold_color = (10, 10, 10, 10)
# Surfaces
original_surface = pygame.Surface(size, pygame.SRCALPHA, 32)
dest_surface = pygame.Surface(size, pygame.SRCALPHA, 32)
# Third surface is used in lieu of 3rd position arg color
third_surface = pygame.Surface(size, pygame.SRCALPHA, 32)
# Color filling
original_surface.fill(original_color)
third_surface.fill(threshold_color)
################################################################
# All pixels for color should be within threshold
#
pixels_within_threshold = pygame.transform.threshold (
dest_surface, original_surface, threshold_color,
threshold,
0, # diff_color
0 # change_return
)
self.assertEqual(w*h, pixels_within_threshold)
################################################################
# This should respect third_surface colors in place of 3rd arg
# color Should be the same as: surface.fill(threshold_color)
# all within threshold
pixels_within_threshold = pygame.transform.threshold (
dest_surface,
original_surface,
0, # color (would fail if honored)
threshold,
0, # diff_color
0, # change_return
third_surface,
)
self.assertEqual(w*h, pixels_within_threshold)
################################################################
# Change dest_surface on return (not expected)
change_color = (255, 10, 10, 10)
pixels_within_threshold = pygame.transform.threshold (
dest_surface,
original_surface,
0, # color
threshold,
change_color, # diff_color
1, # change_return
third_surface,
)
# Return, of pixels within threshold is correct
self.assertEqual(w*h, pixels_within_threshold)
# Size of dest surface is correct
dest_rect = dest_surface.get_rect()
dest_size = dest_rect.size
self.assertEqual(size, dest_size)
# The color is not the change_color specified for every pixel As all
# pixels are within threshold
for pt in test_utils.rect_area_pts(dest_rect):
self.assert_(dest_surface.get_at(pt) != change_color)
################################################################
# Lowering the threshold, expecting changed surface
pixels_within_threshold = pygame.transform.threshold (
dest_surface,
original_surface,
0, # color
0, # threshold
change_color, # diff_color
1, # change_return
third_surface,
)
# Return, of pixels within threshold is correct
self.assertEqual(0, pixels_within_threshold)
# Size of dest surface is correct
dest_rect = dest_surface.get_rect()
dest_size = dest_rect.size
self.assertEqual(size, dest_size)
# The color is the change_color specified for every pixel As all
# pixels are not within threshold
for pt in test_utils.rect_area_pts(dest_rect):
self.assertEqual(dest_surface.get_at(pt), change_color)
#XXX
def test_threshold_non_src_alpha(self):
result = pygame.Surface((10,10))
s1 = pygame.Surface((10,10))
s2 = pygame.Surface((10,10))
s3 = pygame.Surface((10,10))
s4 = pygame.Surface((10,10))
result = pygame.Surface((10,10))
x = s1.fill((0,0,0))
x = s2.fill((0,20,0))
x = s3.fill((0,0,0))
x = s4.fill((0,0,0))
s1.set_at((0,0), (32, 20, 0 ))
s2.set_at((0,0), (33, 21, 0 ))
s2.set_at((3,0), (63, 61, 0 ))
s3.set_at((0,0), (112, 31, 0 ))
s4.set_at((0,0), (11, 31, 0 ))
s4.set_at((1,1), (12, 31, 0 ))
self.assertEqual( s1.get_at((0,0)), (32, 20, 0, 255) )
self.assertEqual( s2.get_at((0,0)), (33, 21, 0, 255) )
self.assertEqual( (0,0), (s1.get_flags(), s2.get_flags()))
#All one hundred of the pixels should be within the threshold.
#>>> object_tracking.diff_image(result, s1, s2, threshold = 20)
#100
similar_color = (255, 255, 255,255)
diff_color=(222,0,0,255)
threshold_color = (20,20,20,255)
rr = pygame.transform.threshold(result, s1, similar_color, threshold_color, diff_color, 1, s2)
self.assertEqual(rr, 99)
self.assertEqual( result.get_at((0,0)), (255,255,255, 255) )
rr = pygame.transform.threshold(result, s1, similar_color,
threshold_color, diff_color, 2, s2)
self.assertEqual(rr, 99)
self.assertEqual( result.get_at((0,0)), (32, 20, 0, 255) )
# this is within the threshold,
# so the color is copied from the s1 surface.
self.assertEqual( result.get_at((1,0)), (0, 0, 0, 255) )
# this color was not in the threshold so it has been set to diff_color
self.assertEqual( result.get_at((3,0)), (222, 0, 0, 255) )
def test_threshold__uneven_colors(self):
(w,h) = size = (16, 16)
original_surface = pygame.Surface(size, pygame.SRCALPHA, 32)
dest_surface = pygame.Surface(size, pygame.SRCALPHA, 32)
original_surface.fill(0)
threshold_color_template = [5, 5, 5, 5]
threshold_template = [6, 6, 6, 6]
################################################################
for pos in range(len('rgb')):
threshold_color = threshold_color_template[:]
threshold = threshold_template[:]
threshold_color[pos] = 45
threshold[pos] = 50
pixels_within_threshold = pygame.transform.threshold (
dest_surface, original_surface, threshold_color,
threshold,
0, # diff_color
0 # change_return
)
self.assertEqual(w*h, pixels_within_threshold)
################################################################
def test_threshold__surface(self):
"""
"""
#pygame.transform.threshold(DestSurface, Surface, color, threshold = (0,0,0,0), diff_color = (0,0,0,0), change_return = True): return num_threshold_pixels
threshold = pygame.transform.threshold
s1 = pygame.Surface((32,32), SRCALPHA, 32)
s2 = pygame.Surface((32,32), SRCALPHA, 32)
s3 = pygame.Surface((1,1), SRCALPHA, 32)
s1.fill((40,40,40))
s2.fill((255,255,255))
dest_surface = s2
surface1 = s1
color = (30,30,30)
the_threshold = (11,11,11)
diff_color = (255,0,0)
change_return = 2
# set the similar pixels in destination surface to the color
# in the first surface.
num_threshold_pixels = threshold(dest_surface, surface1, color,
the_threshold, diff_color,
change_return)
#num_threshold_pixels = threshold(s2, s1, (30,30,30))
self.assertEqual(num_threshold_pixels, s1.get_height() * s1.get_width())
self.assertEqual(s2.get_at((0,0)), (40, 40, 40, 255))
if 1:
# only one pixel should not be changed.
s1.fill((40,40,40))
s2.fill((255,255,255))
s1.set_at( (0,0), (170, 170, 170) )
# set the similar pixels in destination surface to the color
# in the first surface.
num_threshold_pixels = threshold(s2, s1, (30,30,30), (11,11,11),
(0,0,0), 2)
#num_threshold_pixels = threshold(s2, s1, (30,30,30))
self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1)
self.assertEqual(s2.get_at((0,0)), (0,0,0, 255))
self.assertEqual(s2.get_at((0,1)), (40, 40, 40, 255))
self.assertEqual(s2.get_at((17,1)), (40, 40, 40, 255))
# abs(40 - 255) < 100
#(abs(c1[0] - r) < tr)
if 1:
s1.fill((160,160,160))
s2.fill((255,255,255))
num_threshold_pixels = threshold(s2, s1, (255,255,255), (100,100,100), (0,0,0), True)
self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()))
if 1:
# only one pixel should not be changed.
s1.fill((40,40,40))
s2.fill((255,255,255))
s1.set_at( (0,0), (170, 170, 170) )
num_threshold_pixels = threshold(s3, s1, (30,30,30), (11,11,11), (0,0,0), False)
#num_threshold_pixels = threshold(s2, s1, (30,30,30))
self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1)
if 1:
# test end markers. 0, and 255
# the pixels are different by 1.
s1.fill((254,254,254))
s2.fill((255,255,255))
s3.fill((255,255,255))
s1.set_at( (0,0), (170, 170, 170) )
num_threshold_pixels = threshold(s3, s1, (254,254,254), (1,1,1),
(44,44,44,255), False)
self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1)
# compare the two surfaces. Should be all but one matching.
num_threshold_pixels = threshold(s3, s1, 0, (1,1,1),
(44,44,44,255), False, s2)
self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1)
# within (0,0,0) threshold? Should match no pixels.
num_threshold_pixels = threshold(s3, s1, (253,253,253), (0,0,0),
(44,44,44,255), False)
self.assertEqual(num_threshold_pixels, 0)
# other surface within (0,0,0) threshold? Should match no pixels.
num_threshold_pixels = threshold(s3, s1, 0, (0,0,0),
(44,44,44,255), False, s2)
self.assertEqual(num_threshold_pixels, 0)
def test_laplacian(self):
"""
"""
SIZE = 32
s1 = pygame.Surface((SIZE, SIZE))
s2 = pygame.Surface((SIZE, SIZE))
s1.fill((10,10,70))
pygame.draw.line(s1, (255,0,0), (3,10), (20,20))
# a line at the last row of the image.
pygame.draw.line(s1, (255,0,0), (0,31), (31,31))
pygame.transform.laplacian(s1,s2)
#show_image(s1)
#show_image(s2)
self.assertEqual(s2.get_at((0,0)), (0, 0, 0, 255))
self.assertEqual(s2.get_at((3,10)), (255,0,0,255))
self.assertEqual(s2.get_at((0,31)), (255,0,0,255))
self.assertEqual(s2.get_at((31,31)), (255,0,0,255))
# here we create the return surface.
s2 = pygame.transform.laplacian(s1)
self.assertEqual(s2.get_at((0,0)), (0, 0, 0, 255))
self.assertEqual(s2.get_at((3,10)), (255,0,0,255))
self.assertEqual(s2.get_at((0,31)), (255,0,0,255))
self.assertEqual(s2.get_at((31,31)), (255,0,0,255))
def test_average_surfaces(self):
"""
"""
SIZE = 32
s1 = pygame.Surface((SIZE, SIZE))
s2 = pygame.Surface((SIZE, SIZE))
s3 = pygame.Surface((SIZE, SIZE))
s1.fill((10,10,70))
s2.fill((10,20,70))
s3.fill((10,130,10))
surfaces = [s1, s2, s3]
surfaces = [s1, s2]
sr = pygame.transform.average_surfaces(surfaces)
self.assertEqual(sr.get_at((0,0)), (10,15,70,255))
self.assertRaises(TypeError, pygame.transform.average_surfaces, 1)
self.assertRaises(TypeError, pygame.transform.average_surfaces, [])
self.assertRaises(TypeError, pygame.transform.average_surfaces, [1])
self.assertRaises(TypeError, pygame.transform.average_surfaces, [s1, 1])
self.assertRaises(TypeError, pygame.transform.average_surfaces, [1, s1])
self.assertRaises(TypeError, pygame.transform.average_surfaces, [s1, s2, 1])
self.assertRaises(TypeError, pygame.transform.average_surfaces, (s for s in [s1, s2,s3] ))
def test_average_surfaces__24(self):
SIZE = 32
depth = 24
s1 = pygame.Surface((SIZE, SIZE), 0, depth)
s2 = pygame.Surface((SIZE, SIZE), 0, depth)
s3 = pygame.Surface((SIZE, SIZE), 0, depth)
s1.fill((10,10,70, 255))
s2.fill((10,20,70, 255))
s3.fill((10,130,10, 255))
surfaces = [s1, s2, s3]
sr = pygame.transform.average_surfaces(surfaces)
self.assertEqual( sr.get_masks(), s1.get_masks() )
self.assertEqual( sr.get_flags(), s1.get_flags() )
self.assertEqual( sr.get_losses(), s1.get_losses() )
if 0:
print ( sr, s1 )
print ( sr.get_masks(), s1.get_masks() )
print ( sr.get_flags(), s1.get_flags() )
print ( sr.get_losses(), s1.get_losses() )
print ( sr.get_shifts(), s1.get_shifts() )
self.assertEqual(sr.get_at((0,0)), (10,53,50,255))
def test_average_color(self):
"""
"""
a = [24, 32]
for i in a:
s = pygame.Surface((32,32), 0, i)
s.fill((0,100,200))
s.fill((10,50,100), (0,0,16,32))
self.assertEqual(pygame.transform.average_color(s),(5,75,150,0))
self.assertEqual(pygame.transform.average_color(s, (16,0,16,32)), (0,100,200,0))
def todo_test_rotate(self):
# __doc__ (as of 2008-06-25) for pygame.transform.rotate:
# pygame.transform.rotate(Surface, angle): return Surface
# rotate an image
# color = (128, 128, 128, 255)
# s = pygame.Surface((3, 3))
# s.set_at((2, 0), color)
# self.assert_(s.get_at((0, 0)) != color)
# s = pygame.transform.rotate(s, 90)
# self.assert_(s.get_at((0, 0)) == color)
self.fail()
def test_rotate__lossless_at_90_degrees(self):
w, h = 32, 32
s = pygame.Surface((w, h), pygame.SRCALPHA)
gradient = list(test_utils.gradient(w, h))
for pt, color in gradient: s.set_at(pt, color)
for rotation in (90, -90):
s = pygame.transform.rotate(s,rotation)
for pt, color in gradient:
self.assert_(s.get_at(pt) == color)
def test_scale2x(self):
# __doc__ (as of 2008-06-25) for pygame.transform.scale2x:
# pygame.transform.scale2x(Surface, DestSurface = None): Surface
# specialized image doubler
w, h = 32, 32
s = pygame.Surface((w, h), pygame.SRCALPHA, 32)
# s.set_at((0,0), (20, 20, 20, 255))
s2 = pygame.transform.scale2x(s)
self.assertEquals(s2.get_rect().size, (64, 64))
def test_get_smoothscale_backend(self):
filter_type = pygame.transform.get_smoothscale_backend()
self.failUnless(filter_type in ['GENERIC', 'MMX', 'SSE'])
# It would be nice to test if a non-generic type corresponds to an x86
# processor. But there is no simple test for this. platform.machine()
# returns process version specific information, like 'i686'.
def test_set_smoothscale_backend(self):
# All machines should allow 'GENERIC'.
original_type = pygame.transform.get_smoothscale_backend()
pygame.transform.set_smoothscale_backend('GENERIC')
filter_type = pygame.transform.get_smoothscale_backend()
self.failUnlessEqual(filter_type, 'GENERIC')
# All machines should allow returning to original value.
# Also check that keyword argument works.
pygame.transform.set_smoothscale_backend(type=original_type)
# Something invalid.
def change():
pygame.transform.set_smoothscale_backend('mmx')
self.failUnlessRaises(ValueError, change)
# Invalid argument keyword.
def change():
pygame.transform.set_smoothscale_backend(t='GENERIC')
self.failUnlessRaises(TypeError, change)
# Invalid argument type.
def change():
pygame.transform.set_smoothscale_backend(1)
self.failUnlessRaises(TypeError, change)
# Unsupported type, if possible.
if original_type != 'SSE':
def change():
pygame.transform.set_smoothscale_backend('SSE')
self.failUnlessRaises(ValueError, change)
# Should be back where we started.
filter_type = pygame.transform.get_smoothscale_backend()
self.failUnlessEqual(filter_type, original_type)
def todo_test_chop(self):
# __doc__ (as of 2008-08-02) for pygame.transform.chop:
# pygame.transform.chop(Surface, rect): return Surface
# gets a copy of an image with an interior area removed
#
# Extracts a portion of an image. All vertical and horizontal pixels
# surrounding the given rectangle area are removed. The corner areas
# (diagonal to the rect) are then brought together. (The original
# image is not altered by this operation.)
#
# NOTE: If you want a "crop" that returns the part of an image within
# a rect, you can blit with a rect to a new surface or copy a
# subsurface.
self.fail()
def todo_test_flip(self):
# __doc__ (as of 2008-08-02) for pygame.transform.flip:
# pygame.transform.flip(Surface, xbool, ybool): return Surface
# flip vertically and horizontally
#
# This can flip a Surface either vertically, horizontally, or both.
# Flipping a Surface is nondestructive and returns a new Surface with
# the same dimensions.
self.fail()
def todo_test_rotozoom(self):
# __doc__ (as of 2008-08-02) for pygame.transform.rotozoom:
# pygame.transform.rotozoom(Surface, angle, scale): return Surface
# filtered scale and rotation
#
# This is a combined scale and rotation transform. The resulting
# Surface will be a filtered 32-bit Surface. The scale argument is a
# floating point value that will be multiplied by the current
# resolution. The angle argument is a floating point value that
# represents the counterclockwise degrees to rotate. A negative
# rotation angle will rotate clockwise.
self.fail()
def todo_test_smoothscale(self):
# __doc__ (as of 2008-08-02) for pygame.transform.smoothscale:
# pygame.transform.smoothscale(Surface, (width, height), DestSurface =
# None): return Surface
#
# scale a surface to an arbitrary size smoothly
#
# Uses one of two different algorithms for scaling each dimension of
# the input surface as required. For shrinkage, the output pixels are
# area averages of the colors they cover. For expansion, a bilinear
# filter is used. For the amd64 and i686 architectures, optimized MMX
# routines are included and will run much faster than other machine
# types. The size is a 2 number sequence for (width, height). This
# function only works for 24-bit or 32-bit surfaces. An exception
# will be thrown if the input surface bit depth is less than 24.
#
# New in pygame 1.8
self.fail()
if __name__ == '__main__':
#tt = TransformModuleTest()
#tt.test_threshold_non_src_alpha()
unittest.main()
| gpl-2.0 | -7,855,927,651,751,406,000 | 32.777476 | 162 | 0.540532 | false |
51reboot/actual_13_homework | 03/sxq/dict_file_build.py | 1 | 5067 | #!/usr/bin/env python
#coding:utf-8
print '-'*30 + 'Dict 內建函数'
#dict
my_dict = {'s':'1','x':'2','q':'3'}
new_dict = {}
#clear 清空字典
# print my_dict
# my_dict.clear()
# print my_dict
# copy 深拷贝,被拷贝对象不变
# new_dict=my_dict.copy()
# new_dict['a']=4
# print 'new_dict %s'%new_dict
# print my_dict
# fromkeys 从列表中获取key,并创建成新的字典
# print new_dict.fromkeys(my_dict.keys(),10)
# get 获取键对应的值,如果键不存在,则返回None
# print my_dict.get('z')
# has_key 如果k 存在,返回True,否则返回False
# print my_dict.has_key('z')
# items 将字典转换成元组组成的列表
# print my_dict.items()
# iteritems 一个键值的迭代器
# D.iteritems() -> an iterator over the (key, value) items of D
# print my_dict.iteritems()
# iterkeys
# D.iterkeys() -> an iterator over the keys of D
# itervalues
# D.itervalues() -> an iterator over the values of D
# keys 获取字典key
# print my_dict.keys()
# values 获取字典的value
# print my_dict.values()
# pop 弹出key,value,并返回value,不指定则从最后一个,如果为空则报错
# print my_dict.pop('s')
# popitem 弹出key,value 并返回key,value,不指定就从最后一个,如果为空则报错
# print my_dict.popitem()
# setdefault 如果存在,则返回,如果不存在,则 赋默认值
# D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
# print my_dict.setdefault('z',0)
# print my_dict
# update 将一个字典添加到指定字典中
# dict2 = {'r':1,'o':2}
# my_dict.update(dict2)
# print my_dict
# viewitems 以元组的形式返回字典
# print my_dict.viewitems()
# viewkeys 以元组的形式返回字典的key
# print my_dict.viewkeys()
# viewvalues 以元组的形式返回字典的value
# print my_dict.viewvalues()
print
print '-'*30 + 'file 內建函数'
print
# with open('log.txt') as f :
# close 关闭文件
# print help(file.close)
# closed 如果文件关闭,返回bool
# print help(file.closed)
# encoding 字符编码
# errors unicode 错误处理
# fileno 文件描述符
# print f.fileno()
# flush 从内存页中将内容刷新到磁盘
# isatty 判断文件是否连接到终端
# print f.isatty()
# mode 返回打开文件的模式
# print f.mode
# name 返回文件的名字
# print f.name
# newlines 从最后一行使用文件
# print f.newlines
# next 下一个值
# print f.next()
# read 按字节读文件
# readline 按行读取文件,一次读取一行
# readlines 按行读取文件,一次读取所有
# seek 设置文件指针 无返回值
# tell 查看文件指针
# softspace 使用print 输出后 添加空格符
# print f.read()
# truncate 从文件的首行首字符开始截断,截断文件为n个字符;无n表示从当前位置起截断;截断之后n后面的所有字符被删除
# print f.truncate(8)
# write 写文件,一次写一个字符
# writelines 按行写文件,一次写多行
# xreadlines ?
#
print '-'*30 + 'OS 內建函数'
import os
import time
filename = '/Users/xiaoqiang/PycharmProjects/actual_13_homework/03/sxq/log.txt'
# os.path
# abspath 获取绝对路径
print os.path.abspath('log.txt')
# basename 返回path最后的文件名
print os.path.basename(filename)
# dirname 去掉文件名,返回目录
print os.path.dirname(filename)
# commonprefix 返回list中,所有path 共有的最长 的路径。
print os.path.commonprefix(['/Users/xiaoqiang','/'])
# defpath 获取 /bin 路径
print os.path.defpath
# devnull /dev/null
print os.path.devnull
# exists 文件或目录是否存在,返回bool
print os.path.exists(filename)
# expanduser
print os.path.expanduser(filename)
# expandvars
print os.path.expandvars(filename)
# getatime 返回最近访问的时间 浮点型秒数
print os.path.getatime(filename)
# getctime 返回创建时间
print os.path.getctime(filename)
# getmtime 返回文件最近修改的时间
print os.path.getmtime(filename)
# getsize 返回文件大小,字节数
print os.path.getsize(filename)
# isabs 是否绝对路径
print os.path.isabs(filename)
# isdir 是否目录
print os.path.isdir(filename)
# isfile 是否文件
print os.path.isfile(filename)
# islink 是否连接
print os.path.islink(filename)
# ismount 是否挂载
print os.path.ismount(filename)
# join 拼接目录
print os.path.join('a','b','log.txt')
print '+'*30
# normcase
print os.path.normcase(filename)
# normpath
print os.path.normpath(filename)
print '+'*30
# realpath
print os.path.realpath(filename)
# relpath 返回文件路径,以当前路径为基础
print os.path.relpath(filename)
# samefile 两个路径是否指向同一个文件
# sameopenfile 两个文件是否相同
# samestat
# stat
print '+'*30
# split 返回目录和文件
print os.path.split(filename)
# splitdrive
print os.path.splitdrive(filename)
# splitext 截取文件格式
print os.path.splitext(filename)
# supports_unicode_filenames
# sys
# walk
# warnings
# os
# pardir
# pathsep
# altsep
# extsep
# genericpath
# curdir
# lexists | mit | 6,341,058,686,912,388,000 | 17.946341 | 79 | 0.720319 | false |
adityacs/ansible | lib/ansible/plugins/action/unarchive.py | 28 | 4994 | # (c) 2012, Michael DeHaan <[email protected]>
# (c) 2013, Dylan Martin <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.pycompat24 import get_exception
from ansible.plugins.action import ActionBase
from ansible.constants import mk_boolean as boolean
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for unarchive operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False))
creates = self._task.args.get('creates', None)
# "copy" is deprecated in favor of "remote_src".
if 'copy' in self._task.args:
# They are mutually exclusive.
if 'remote_src' in self._task.args:
result['failed'] = True
result['msg'] = "parameters are mutually exclusive: ('copy', 'remote_src')"
return result
# We will take the information from copy and store it in
# the remote_src var to use later in this file.
remote_src = not boolean(self._task.args.get('copy'))
if source is None or dest is None:
result['failed'] = True
result['msg'] = "src (or content) and dest are required"
return result
if not tmp:
tmp = self._make_tmp_path()
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if self._remote_file_exists(creates):
result['skipped'] = True
result['msg'] = "skipped, since %s exists" % creates
self._remove_tmp_path(tmp)
return result
dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if not remote_src:
try:
source = self._loader.get_real_file(self._find_needle('files', source))
except AnsibleError:
result['failed'] = True
result['msg'] = to_native(get_exception())
self._remove_tmp_path(tmp)
return result
try:
remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
except AnsibleError:
result['failed'] = True
result['msg'] = to_native(get_exception())
self._remove_tmp_path(tmp)
return result
if not remote_stat['exists'] or not remote_stat['isdir']:
result['failed'] = True
result['msg'] = "dest '%s' must be an existing dir" % dest
self._remove_tmp_path(tmp)
return result
if not remote_src:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(tmp, 'source')
self._transfer_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
if not remote_src:
# fix file permissions when the copy is done as a different user
self._fixup_perms2((tmp, tmp_src))
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
original_basename=os.path.basename(source),
),
)
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
original_basename=os.path.basename(source),
),
)
# execute the unarchive module now, with the updated args
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
self._remove_tmp_path(tmp)
return result
| gpl-3.0 | -6,481,284,628,784,896,000 | 36.833333 | 93 | 0.594313 | false |
Timurdov/bionic | bionic/Lib/site-packages/django/db/migrations/autodetector.py | 9 | 52650 | from __future__ import unicode_literals
import re
import datetime
from itertools import chain
from django.utils import six
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.operations.models import AlterModelOptions
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None, convert_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if not hasattr(obj, 'deconstruct') or isinstance(obj, type):
return obj
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
dict(
(key, self.deep_deconstruct(value))
for key, value in kwargs.items()
),
)
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in fields:
deconstruction = self.deep_deconstruct(field)
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.render(ignore_swappable=True)
self.new_apps = self.to_state.render()
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare field lists, and prepare a list of the fields that used
# through models in the old state so we can make dependencies
# from the through model deletion to the field that uses it.
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Through model map generation
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(field_name)[0]
if hasattr(old_field, "rel") and getattr(old_field.rel, "through", None) and not old_field.rel.through._meta.auto_created:
through_key = (
old_field.rel.through._meta.app_label,
old_field.rel.through._meta.object_name.lower(),
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
# Now, reordering to make things possible. The order we have already
# isn't bad, but we need to pull a few things around so FKs work nicely
# inside the same app
for app_label, ops in sorted(self.generated_operations.items()):
for i in range(10000):
found = False
for i, op in enumerate(ops):
for dep in op._auto_deps:
if dep[0] == app_label:
# Alright, there's a dependency on the same app.
for j, op2 in enumerate(ops):
if self.check_dependency(op2, dep) and j > i:
ops = ops[:i] + ops[i + 1:j + 1] + [op] + ops[j + 1:]
found = True
break
if found:
break
if found:
break
if not found:
break
else:
raise ValueError("Infinite loop caught in operation dependency resolution")
self.generated_operations[app_label] = ops
# Now, we need to chop the lists of operations up into migrations with
# dependencies on each other.
# We do this by stepping up an app's list of operations until we
# find one that has an outgoing dependency that isn't in another app's
# migration yet (hasn't been chopped off its list). We then chop off the
# operations before it into a migration and move onto the next app.
# If we loop back around without doing anything, there's a circular
# dependency (which _should_ be impossible as the operations are all
# split at this point so they can't depend and be depended on)
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
# OK, add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
return self.migrations
def check_dependency(self, operation, dependency):
"""
Checks if an operation dependency matches an operation.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name.lower() == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name.lower() == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
added_models = set(self.new_model_keys) - set(self.old_model_keys)
added_unmanaged_models = set(self.new_unmanaged_keys) - set(self.old_unmanaged_keys)
models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.rel:
if field.rel.to:
if field.primary_key:
primary_key_rel = field.rel.to
else:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
deleted_models = set(self.old_model_keys) - set(self.new_model_keys)
deleted_unmanaged_models = set(self.old_unmanaged_keys) - set(self.new_unmanaged_keys)
models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.get_all_related_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
"alter",
))
for related_object in model._meta.get_all_related_many_to_many_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name.lower()), None)
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.rel and field.rel.to:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default() and not isinstance(field, models.ManyToManyField):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=False,
),
dependencies=dependencies,
)
else:
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an order_with_respect_to;
# this is safely ignored if there isn't one
dependencies=[(app_label, model_name, field_name, "order_wrt_unset")],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(old_field_name)[0]
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "rel") and getattr(new_field.rel, "to", None):
rename_key = (
new_field.rel.to._meta.app_label,
new_field.rel.to._meta.object_name.lower(),
)
if rename_key in self.renamed_models:
new_field.rel.to = old_field.rel.to
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not isinstance(new_field, models.ManyToManyField)):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = set([
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
])
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get("order_with_respect_to", None) != new_model_state.options.get("order_with_respect_to", None):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to", None):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to', None),
),
dependencies=dependencies,
)
def arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| apache-2.0 | 8,536,600,039,370,450,000 | 47.258478 | 166 | 0.536847 | false |
awesto/django-shop | tests/testshop/models.py | 2 | 1146 | from django.db import models
from shop.models.defaults.commodity import Commodity
from shop.models.defaults.cart import Cart
from shop.models.defaults.cart_item import CartItem
from shop.models.defaults.order import Order
from shop.models.order import BaseOrderItem
from shop.models.defaults.delivery import Delivery
from shop.models.defaults.delivery_item import DeliveryItem
from shop.models.defaults.address import BillingAddress, ShippingAddress
from shop.models.defaults.customer import Customer
from shop.models.inventory import BaseInventory, AvailableProductMixin
__all__ = ['Commodity', 'Cart', 'CartItem', 'Order', 'OrderItem', 'Delivery', 'DeliveryItem',
'BillingAddress', 'ShippingAddress', 'Customer']
class OrderItem(BaseOrderItem):
quantity = models.PositiveIntegerField()
canceled = models.BooleanField(default=False)
class MyProduct(AvailableProductMixin, Commodity):
pass
class MyProductInventory(BaseInventory):
product = models.ForeignKey(
MyProduct,
on_delete=models.CASCADE,
related_name='inventory_set',
)
quantity = models.PositiveIntegerField(default=0)
| bsd-3-clause | -7,376,465,464,972,806,000 | 33.727273 | 93 | 0.775742 | false |
cmu-db/cmdbac | blog/models.py | 2 | 8482 | # -*- coding: utf8 -*-
import json
try:
from urllib2 import urlopen # noqa
except ImportError:
from urllib.request import urlopen # noqa
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import strip_tags
from django.contrib.sites.models import Site
try:
import twitter
except ImportError:
twitter = None
import pytz
from .conf import settings
from .managers import PostManager
from .utils import can_tweet
try:
from string import letters
except ImportError:
from string import ascii_letters as letters
from random import choice
def ig(L, i):
for x in L:
yield x[i]
STATES = settings.PINAX_BLOG_UNPUBLISHED_STATES + ["Published"]
PINAX_BLOG_STATE_CHOICES = list(zip(range(1, 1 + len(STATES)), STATES))
@python_2_unicode_compatible
class Section(models.Model):
name = models.CharField(max_length=150, unique=True)
slug = models.SlugField(unique=True)
enabled = models.BooleanField(default=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
STATE_CHOICES = PINAX_BLOG_STATE_CHOICES
section = models.ForeignKey(Section)
title = models.CharField(max_length=90)
slug = models.SlugField(unique=settings.PINAX_BLOG_SLUG_UNIQUE)
author = models.ForeignKey(User, related_name="posts")
markup = models.CharField(max_length=25, choices=settings.PINAX_BLOG_MARKUP_CHOICES)
teaser_html = models.TextField(editable=False)
content_html = models.TextField(editable=False)
description = models.TextField(blank=True)
primary_image = models.ForeignKey("Image", null=True, blank=True, related_name="+")
tweet_text = models.CharField(max_length=140, editable=False)
created = models.DateTimeField(default=timezone.now, editable=False) # when first revision was created
updated = models.DateTimeField(null=True, blank=True, editable=False) # when last revision was created (even if not published)
published = models.DateTimeField(null=True, blank=True) # when last published
state = models.IntegerField(choices=STATE_CHOICES, default=STATE_CHOICES[0][0])
secret_key = models.CharField(
max_length=8,
blank=True,
unique=True,
help_text="allows url for sharing unpublished posts to unauthenticated users"
)
view_count = models.IntegerField(default=0, editable=False)
@property
def older_post(self):
qs = Post.objects.published()
if self.is_published:
qs = qs.filter(published__lt=self.published)
return next(iter(qs), None)
@property
def newer_post(self):
if self.is_published:
return next(iter(Post.objects.published().order_by("published").filter(published__gt=self.published)), None)
@property
def is_published(self):
return self.state == PINAX_BLOG_STATE_CHOICES[-1][0]
@property
def meta_description(self):
if self.description:
return self.description
else:
return strip_tags(self.teaser_html)
@property
def meta_image(self):
if self.primary_image:
return self.primary_image.image_path.url
def rev(self, rev_id):
return self.revisions.get(pk=rev_id)
def current(self):
"the currently visible (latest published) revision"
return self.revisions.exclude(published=None).order_by("-published")[0]
def latest(self):
"the latest modified (even if not published) revision"
try:
return self.revisions.order_by("-updated")[0]
except IndexError:
return None
class Meta:
ordering = ("-published",)
get_latest_by = "published"
objects = PostManager()
def __str__(self):
return self.title
def as_tweet(self):
if not self.tweet_text:
current_site = Site.objects.get_current()
api_url = "http://api.tr.im/api/trim_url.json"
u = urlopen("%s?url=http://%s%s" % (
api_url,
current_site.domain,
self.get_absolute_url(),
))
result = json.loads(u.read())
self.tweet_text = "%s %s — %s" % (
settings.TWITTER_TWEET_PREFIX,
self.title,
result["url"],
)
return self.tweet_text
def tweet(self):
if can_tweet():
account = twitter.Api(
username=settings.TWITTER_USERNAME,
password=settings.TWITTER_PASSWORD,
)
account.PostUpdate(self.as_tweet())
else:
raise ImproperlyConfigured(
"Unable to send tweet due to either "
"missing python-twitter or required settings."
)
def save(self, **kwargs):
self.updated_at = timezone.now()
if not self.secret_key:
# Generate a random secret key
self.secret_key = "".join(choice(letters) for _ in range(8))
if self.is_published and self.published is None:
self.published = timezone.now()
super(Post, self).save(**kwargs)
@property
def sharable_url(self):
"""
An url to reach this post (there is a secret url for sharing unpublished
posts to outside users).
"""
if not self.is_published:
if self.secret_key:
return reverse("blog_post_secret", kwargs={"post_secret_key": self.secret_key})
else:
return "A secret sharable url for non-authenticated users is generated when you save this post."
else:
return self.get_absolute_url()
def get_absolute_url(self):
if self.is_published:
if settings.PINAX_BLOG_SLUG_UNIQUE:
name = "blog_post_slug"
kwargs = {
"post_slug": self.slug
}
else:
name = "blog_post"
if settings.USE_TZ and settings.TIME_ZONE:
published = pytz.timezone(settings.TIME_ZONE).normalize(self.published)
else:
published = self.published
kwargs = {
"year": published.strftime("%Y"),
"month": published.strftime("%m"),
"day": published.strftime("%d"),
"slug": self.slug,
}
else:
name = "blog_post_pk"
kwargs = {
"post_pk": self.pk,
}
return reverse(name, kwargs=kwargs)
def inc_views(self):
self.view_count += 1
self.save()
self.current().inc_views()
@python_2_unicode_compatible
class Revision(models.Model):
post = models.ForeignKey(Post, related_name="revisions")
title = models.CharField(max_length=90)
teaser = models.TextField()
content = models.TextField()
author = models.ForeignKey(User, related_name="revisions")
updated = models.DateTimeField(default=timezone.now)
published = models.DateTimeField(null=True, blank=True)
view_count = models.IntegerField(default=0, editable=False)
def __str__(self):
return "Revision %s for %s" % (self.updated.strftime('%Y%m%d-%H%M'), self.post.slug)
def inc_views(self):
self.view_count += 1
self.save()
@python_2_unicode_compatible
class Image(models.Model):
post = models.ForeignKey(Post, related_name="images")
image_path = models.ImageField(upload_to="images/%Y/%m/%d")
url = models.CharField(max_length=150, blank=True)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
def __str__(self):
if self.pk is not None:
return "{{ %d }}" % self.pk
else:
return "deleted image"
class FeedHit(models.Model):
request_data = models.TextField()
created = models.DateTimeField(default=timezone.now)
class ReviewComment(models.Model):
post = models.ForeignKey(Post, related_name="review_comments")
review_text = models.TextField()
timestamp = models.DateTimeField(default=timezone.now)
addressed = models.BooleanField(default=False)
| apache-2.0 | -1,693,174,225,490,352,400 | 29.394265 | 131 | 0.617571 | false |
RydrDojo/Ridr | pylotVenv/lib/python2.7/site-packages/alembic/testing/plugin/noseplugin.py | 15 | 2756 | # plugin/noseplugin.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Enhance nose with extra options and behaviors for running SQLAlchemy tests.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0.
"""
try:
# installed by bootstrap.py
import alembic_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import os
import sys
from nose.plugins import Plugin
fixtures = None
py3k = sys.version_info >= (3, 0)
class NoseSQLAlchemy(Plugin):
enabled = True
name = 'sqla_testing'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
def wrap_(option, opt_str, value, parser):
callback_(opt_str, value, parser)
kw["callback"] = wrap_
opt(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def configure(self, options, conf):
super(NoseSQLAlchemy, self).configure(options, conf)
plugin_base.pre_begin(options)
plugin_base.set_coverage_flag(options.enable_plugin_coverage)
def begin(self):
global fixtures
from alembic.testing import fixtures # noqa
plugin_base.post_begin()
def describeTest(self, test):
return ""
def wantFunction(self, fn):
return False
def wantMethod(self, fn):
if py3k:
if not hasattr(fn.__self__, 'cls'):
return False
cls = fn.__self__.cls
else:
cls = fn.im_class
return plugin_base.want_method(cls, fn)
def wantClass(self, cls):
return plugin_base.want_class(cls)
def beforeTest(self, test):
plugin_base.before_test(
test,
test.test.cls.__module__,
test.test.cls, test.test.method.__name__)
def afterTest(self, test):
plugin_base.after_test(test)
def startContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.start_test_class(ctx)
def stopContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.stop_test_class(ctx)
| mit | 3,691,909,365,062,004,000 | 25.757282 | 75 | 0.616473 | false |
eufarn7sp/egads-gui | ui/Ui_downloadwindow.py | 1 | 10275 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'downloadupdatewindow.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_downloadWindow(object):
def setupUi(self, downloadWindow):
downloadWindow.setObjectName("downloadWindow")
downloadWindow.resize(500, 230)
downloadWindow.setMinimumSize(QtCore.QSize(500, 230))
downloadWindow.setMaximumSize(QtCore.QSize(500, 230))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
downloadWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/info_popup_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
downloadWindow.setWindowIcon(icon)
downloadWindow.setStyleSheet("QWidget {\n"
" background-color: rgb(230,230,230);\n"
"}")
self.widget = QtWidgets.QWidget(downloadWindow)
self.widget.setGeometry(QtCore.QRect(11, 11, 478, 208))
self.widget.setObjectName("widget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.dw_label_2 = QtWidgets.QLabel(self.widget)
self.dw_label_2.setMinimumSize(QtCore.QSize(50, 50))
self.dw_label_2.setMaximumSize(QtCore.QSize(50, 50))
self.dw_label_2.setText("")
self.dw_label_2.setPixmap(QtGui.QPixmap("icons/info_popup_icon.svg"))
self.dw_label_2.setScaledContents(True)
self.dw_label_2.setObjectName("dw_label_2")
self.verticalLayout.addWidget(self.dw_label_2)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_3.addLayout(self.verticalLayout)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.dw_label_1 = QtWidgets.QLabel(self.widget)
self.dw_label_1.setMinimumSize(QtCore.QSize(0, 0))
self.dw_label_1.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.dw_label_1.setFont(font)
self.dw_label_1.setStyleSheet("QLabel {\n"
" color: black;\n"
"}")
self.dw_label_1.setFrameShape(QtWidgets.QFrame.NoFrame)
self.dw_label_1.setFrameShadow(QtWidgets.QFrame.Plain)
self.dw_label_1.setLineWidth(0)
self.dw_label_1.setMidLineWidth(0)
self.dw_label_1.setTextFormat(QtCore.Qt.AutoText)
self.dw_label_1.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)
self.dw_label_1.setWordWrap(True)
self.dw_label_1.setObjectName("dw_label_1")
self.verticalLayout_2.addWidget(self.dw_label_1)
spacerItem2 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_2.addItem(spacerItem2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.dw_downloadButton = QtWidgets.QToolButton(self.widget)
self.dw_downloadButton.setMinimumSize(QtCore.QSize(160, 27))
self.dw_downloadButton.setMaximumSize(QtCore.QSize(160, 27))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.dw_downloadButton.setFont(font)
self.dw_downloadButton.setStyleSheet("QToolButton {\n"
" border: 1px solid #acacac;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #f0f0f0, stop: 1 #e5e5e5);\n"
" color: black;\n"
"}\n"
"\n"
"QToolButton:hover {\n"
" border: 1px solid #7eb4ea;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #ecf4fc, stop: 1 #dcecfc);\n"
"}\n"
"\n"
"\n"
"QToolButton:pressed {\n"
" border: 1px solid #579de5;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #daecfc, stop: 1 #c4e0fc);\n"
"}")
self.dw_downloadButton.setObjectName("dw_downloadButton")
self.horizontalLayout.addWidget(self.dw_downloadButton)
spacerItem4 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem4)
self.dw_downloadButton_2 = QtWidgets.QToolButton(self.widget)
self.dw_downloadButton_2.setMinimumSize(QtCore.QSize(140, 27))
self.dw_downloadButton_2.setMaximumSize(QtCore.QSize(140, 27))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.dw_downloadButton_2.setFont(font)
self.dw_downloadButton_2.setStyleSheet("QToolButton {\n"
" border: 1px solid #acacac;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #f0f0f0, stop: 1 #e5e5e5);\n"
" color: black;\n"
"}\n"
"\n"
"QToolButton:hover {\n"
" border: 1px solid #7eb4ea;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #ecf4fc, stop: 1 #dcecfc);\n"
"}\n"
"\n"
"\n"
"QToolButton:pressed {\n"
" border: 1px solid #579de5;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #daecfc, stop: 1 #c4e0fc);\n"
"}")
self.dw_downloadButton_2.setObjectName("dw_downloadButton_2")
self.horizontalLayout.addWidget(self.dw_downloadButton_2)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
spacerItem6 = QtWidgets.QSpacerItem(20, 17, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem6)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem7 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem7)
self.dw_okButton = QtWidgets.QToolButton(self.widget)
self.dw_okButton.setMinimumSize(QtCore.QSize(93, 27))
self.dw_okButton.setMaximumSize(QtCore.QSize(93, 27))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.dw_okButton.setFont(font)
self.dw_okButton.setStyleSheet("QToolButton {\n"
" border: 1px solid #acacac;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #f0f0f0, stop: 1 #e5e5e5);\n"
" color: black;\n"
"}\n"
"\n"
"QToolButton:hover {\n"
" border: 1px solid #7eb4ea;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #ecf4fc, stop: 1 #dcecfc);\n"
"}\n"
"\n"
"\n"
"QToolButton:pressed {\n"
" border: 1px solid #579de5;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #daecfc, stop: 1 #c4e0fc);\n"
"}")
self.dw_okButton.setObjectName("dw_okButton")
self.horizontalLayout_2.addWidget(self.dw_okButton)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem8)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.retranslateUi(downloadWindow)
QtCore.QMetaObject.connectSlotsByName(downloadWindow)
def retranslateUi(self, downloadWindow):
_translate = QtCore.QCoreApplication.translate
downloadWindow.setWindowTitle(_translate("downloadWindow", "Information"))
self.dw_label_1.setText(_translate("downloadWindow", "<html><head/><body><p>A new version of the EGADS GUI is available on GitHub. Click on <span style=\" font-weight:600;\">Download update</span> to download it, or click on <span style=\" font-weight:600;\">Visit GitHub</span> to have a look at the EGADS GUI repository on GitHub.</p></body></html>"))
self.dw_downloadButton.setText(_translate("downloadWindow", "Download update"))
self.dw_downloadButton_2.setText(_translate("downloadWindow", "Visit GitHub"))
self.dw_okButton.setText(_translate("downloadWindow", "Ok"))
| gpl-3.0 | -3,914,230,850,875,428,400 | 48.637681 | 361 | 0.666959 | false |
andrew-aladev/samba-talloc-debug | buildtools/wafsamba/samba_wildcard.py | 34 | 4534 | # based on playground/evil in the waf svn tree
import os, datetime
import Scripting, Utils, Options, Logs, Environment, fnmatch
from Constants import *
from samba_utils import *
def run_task(t, k):
'''run a single build task'''
ret = t.run()
if ret:
raise Utils.WafError("Failed to build %s: %u" % (k, ret))
def run_named_build_task(cmd):
'''run a named build task, matching the cmd name using fnmatch
wildcards against inputs and outputs of all build tasks'''
bld = fake_build_environment(info=False)
found = False
cwd_node = bld.root.find_dir(os.getcwd())
top_node = bld.root.find_dir(bld.srcnode.abspath())
cmd = os.path.normpath(cmd)
# cope with builds of bin/*/*
if os.path.islink(cmd):
cmd = os_path_relpath(os.readlink(cmd), os.getcwd())
if cmd[0:12] == "bin/default/":
cmd = cmd[12:]
for g in bld.task_manager.groups:
for attr in ['outputs', 'inputs']:
for t in g.tasks:
s = getattr(t, attr, [])
for k in s:
relpath1 = k.relpath_gen(cwd_node)
relpath2 = k.relpath_gen(top_node)
if (fnmatch.fnmatch(relpath1, cmd) or
fnmatch.fnmatch(relpath2, cmd)):
t.position = [0,0]
print(t.display())
run_task(t, k)
found = True
if not found:
raise Utils.WafError("Unable to find build target matching %s" % cmd)
def rewrite_compile_targets():
'''cope with the bin/ form of compile target'''
if not Options.options.compile_targets:
return
bld = fake_build_environment(info=False)
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
tlist = []
for t in Options.options.compile_targets.split(','):
if not os.path.islink(t):
tlist.append(t)
continue
link = os.readlink(t)
list = link.split('/')
for name in [list[-1], '/'.join(list[-2:])]:
if name in targets:
tlist.append(name)
continue
Options.options.compile_targets = ",".join(tlist)
def wildcard_main(missing_cmd_fn):
'''this replaces main from Scripting, allowing us to override the
behaviour for unknown commands
If a unknown command is found, then missing_cmd_fn() is called with
the name of the requested command
'''
Scripting.commands = Options.arg_line[:]
# rewrite the compile targets to cope with the bin/xx form
rewrite_compile_targets()
while Scripting.commands:
x = Scripting.commands.pop(0)
ini = datetime.datetime.now()
if x == 'configure':
fun = Scripting.configure
elif x == 'build':
fun = Scripting.build
else:
fun = getattr(Utils.g_module, x, None)
# this is the new addition on top of main from Scripting.py
if not fun:
missing_cmd_fn(x)
break
ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
try:
fun(ctx)
except TypeError:
fun()
else:
fun(ctx)
ela = ''
if not Options.options.progress_bar:
ela = ' (%s)' % Utils.get_elapsed_time(ini)
if x != 'init' and x != 'shutdown':
Logs.info('%r finished successfully%s' % (x, ela))
if not Scripting.commands and x != 'shutdown':
Scripting.commands.append('shutdown')
def fake_build_environment(info=True, flush=False):
"""create all the tasks for the project, but do not run the build
return the build context in use"""
bld = getattr(Utils.g_module, 'build_context', Utils.Context)()
bld = Scripting.check_configured(bld)
Options.commands['install'] = False
Options.commands['uninstall'] = False
Options.is_install = False
bld.is_install = 0 # False
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError("Project not configured (run 'waf configure' first)")
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
if info:
Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
bld.pre_build()
if flush:
bld.flush()
return bld
| gpl-3.0 | -2,839,639,961,722,802,000 | 28.633987 | 82 | 0.574989 | false |
SmartDeveloperHub/gitlab-api-generator | utils/parse.py | 1 | 5885 | """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import os
import settings
from HTMLParser import HTMLParser
__author__ = 'Alejandro F. Carrera'
def check_api_call(string):
# Remove examples
if "?" in str(string):
return False
# Only detect 4 HTTP Methods
call = ["GET", "POST", "PUT", "DELETE"]
for i in call:
if str(string).startswith(i):
return True
return False
def generate_name_from_metadata(method, string, param):
__name = method.lower()
if __name == "put":
__name = "modify"
elif __name == "post":
__name = "create"
else:
pass
__string = string
for i in param:
__string = __string.replace("/:" + i, "")
__string = __string.split("/")
for i in __string:
__name += ("_" + i)
for i in param:
j = i
if "_" in j:
j = i.split("_")
j[0] = j[0].capitalize()
j[1] = j[1].capitalize()
__name += ("_by" + j[0] + j[1])
else:
__name += ("_by" + j.capitalize())
return __name
def generate_param_from_metadata(string):
__string_arr = string.split(":")
__string_arr.pop(0)
__string_res = {}
for i in __string_arr:
if "/" in i:
__string_res[(i.split("/")[0])] = {}
else:
__string_res[i] = {}
return __string_res
def generate_metadata(string):
__string = str(string)
# Remove Whitespaces
__string.strip()
__string = " ".join(__string.split())
__string = __string.replace(" ", "")
# Get Method and Parse
__methods = ["GET", "POST", "PUT", "DELETE"]
__match = [s for s in __methods if __string.startswith(s)]
if len(__match) > 0:
__match = __match[0]
__string = __string.replace(__match + "/", "")
__parameters = generate_param_from_metadata(__string)
__name = generate_name_from_metadata(__match, __string, __parameters)
return {
"string": __string,
"name": __name,
"method": __match,
"url_param": __parameters,
"url_param_number": len(__parameters.keys()),
"spec_param": {},
"spec_param_number": 0
}
return {}
def generate_metadata_parameter(api, param, string):
__url_parameters = api.get("url_param").keys()
if "-" in string:
__str_split = str(string).split(" - ")
if __str_split[1] == "if ":
__desc = ""
else:
__desc = __str_split[1]
__req = __str_split[0] == " (required)"
else:
__desc = ""
__req = str(string) == " (required)"
if param in __url_parameters:
api["url_param"][param] = __desc
else:
api["spec_param"][param] = {
"description": __desc,
"required": __req
}
api["spec_param_number"] += 1
class CUSTOM_PARSE(HTMLParser):
pre_tag = None
def __init__(self):
self.reset()
self.actual_tag = None
self.actual_api = None
self.name = None
self.param_mode = False
self.param_data = None
self.api = {}
def handle_starttag(self, tag, attrs):
self.actual_tag = tag
def handle_endtag(self, tag):
if tag == "ul":
self.param_mode = False
def handle_data(self, data):
if self.actual_tag == "p" and data == "Parameters:":
self.param_mode = True
if self.actual_tag == "code" and check_api_call(data):
md = generate_metadata(data)
self.actual_api = md.get("name")
self.api[self.actual_api] = md
self.param_mode = False
if self.actual_tag == "code" and self.param_mode and not \
str(data).startswith(" (") and str(data) != "\n":
self.param_data = data
elif self.actual_tag == "code" and self.param_mode and str(data).startswith(" ("):
generate_metadata_parameter(self.api[self.actual_api], self.param_data, data)
self.param_data = None
else:
pass
def generate_code_from_file(file_name, file_path):
# Get function
name_lo = str(file_name).replace(".html", "").lower()
# Get file
fi = open(file_path, 'r')
# Parse file
parser = CUSTOM_PARSE()
parser.name = name_lo
parser.feed(fi.read())
return parser.api
def generate_meta_code(file_dir):
md = {}
settings.print_message(" - Generating metadata from html docs ... ")
for i in os.listdir(file_dir):
gen_code = generate_code_from_file(i, file_dir + "/" + i)
for j in gen_code:
if j in md.keys():
settings.print_message(" * Duplicated at [" + i + "]: " + md[j].get("string"))
else:
md[j] = gen_code[j]
return md
| apache-2.0 | 2,813,399,810,699,985,000 | 29.335052 | 94 | 0.513849 | false |
film42/bitcoin-exchange | bitcoin_exchange/settings_application.py | 1 | 1658 | """
Django settings for bitcoin_exchange project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'djcelery',
'kombu.transport.django',
'exchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bitcoin_exchange.urls'
WSGI_APPLICATION = 'bitcoin_exchange.wsgi.application'
AUTH_USER_MODEL = 'exchange.User'
# Template directory
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
| mit | 801,520,769,011,760,000 | 26.180328 | 71 | 0.734017 | false |
Hearen/OnceServer | Server/utils/Tools.py | 1 | 3028 | '''
Author : LHearen
E-mail : [email protected]
Time : 2015-12-16 10 : 28
Description : Used to assist other modules;
'''
from flask import make_response
from utils.OnceLogging import log, init
import subprocess
init("/var/log/xen/libvirt.log", "DEBUG", log)
def moduleLoader(packageName, moduleName):
'''
Author: LHearen
E-mail: [email protected]
Time : 2015-12-15 15:19
Description: Used to load a module from the package
will only return the module but not introduce
the module to the current context unlike importlib;
'''
package = __import__(packageName, globals(), locals(), moduleName.split(), -1)
return getattr(package, moduleName)
def dumpRequest(request):
'''
Author : DBear
Time : 2015-12-15 15 : 33
Description : Present the details of the coming request;
'''
request_detail = """
request.endpoint:{request.endpoint}
request.method:{request.method}
request.view_args:{request.view_args}
request.args:{request.args}
request.form:{request.form}
request.user_agent:{request.user_agent}
request.files:{request.files}
request.is_xhr:{request.is_xhr}
{request.headers}""".format(request=request).strip()
return request_detail
def errorResponseMaker():
'''
Author : LHearen
E-mail : [email protected]
Time : 2015-12-15 15 : 30
Description : Used to handle exception response;
'''
headers = {'Content-Type':'text/plain'}
return make_response("User function failed", 403, headers)
def responseMaker(content):
'''
Author : LHearen
E-mail : [email protected]
Time : 2016-01-05 11:18
Description : Used to handle methods' reponse except create* ones;
'''
headers = {'Content-Type':'text/plain'}
return make_response(str(content), 200, headers)
def logNotFound(objectName, NameOrId, message):
'''
Author : LHearen
E-mail : [email protected]
Time : 2016-01-07 11:09
Description : Used to log not found error in libvirt.log;
'''
log.debug("%s %s Not Found! Message: %s" % (objectName, NameOrId, message))
def executeShellCommand(commands):
'''
Author : LHearen
E-mail : [email protected]
Time : 2016-01-11 14:37
Description : Used to execute a shell script and return output from stdout;
'''
process = subprocess.Popen(commands, shell=True, \
stdout=subprocess.PIPE)
ret = process.communicate()
return ret
def executeShellScripts(shellDir, executor='/bin/bash'):
'''
Author : LHearen
E-mail : [email protected]
Time : 2016-01-11 14:37
Description : Used to execute a shell script and return output from stdout;
'''
process = subprocess.Popen(shellDir, shell=False, \
stdout=subprocess.PIPE)
ret = process.communicate()
return ret
| mit | 2,404,669,267,171,790,300 | 30.873684 | 82 | 0.61889 | false |
gpetretto/pymatgen | pymatgen/analysis/structure_matcher.py | 6 | 43455 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to perform fitting of structures.
"""
from __future__ import division, unicode_literals
import six
from six.moves import filter
from six.moves import zip
import numpy as np
import itertools
import abc
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.optimization.linear_assignment import LinearAssignment
from pymatgen.util.coord_cython import pbc_shortest_vectors, is_coord_subset_pbc
from pymatgen.util.coord import lattice_points_in_supercell
__author__ = "William Davidson Richards, Stephen Dacek, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "William Davidson Richards"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Dec 3, 2012"
class AbstractComparator(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract Comparator class. A Comparator defines how sites are compared in
a structure.
"""
@abc.abstractmethod
def are_equal(self, sp1, sp2):
"""
Defines how the species of two sites are considered equal. For
example, one can consider sites to have the same species only when
the species are exactly the same, i.e., Fe2+ matches Fe2+ but not
Fe3+. Or one can define that only the element matters,
and all oxidation state information are ignored.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are considered equal.
"""
return
@abc.abstractmethod
def get_hash(self, composition):
"""
Defines a hash to group structures. This allows structures to be
grouped efficiently for comparison. The hash must be invariant under
supercell creation. (e.g. composition is not a good hash, but
fractional_composition might be). Reduced formula is not a good formula,
due to weird behavior with fractional occupancy.
Composition is used here instead of structure because for anonymous
matches it is much quicker to apply a substitution to a composition
object than a structure object.
Args:
composition (Composition): composition of the structure
Returns:
A hashable object. Examples can be string formulas, integers etc.
"""
return
@classmethod
def from_dict(cls, d):
for trans_modules in ['structure_matcher']:
mod = __import__('pymatgen.analysis.' + trans_modules,
globals(), locals(), [d['@class']], 0)
if hasattr(mod, d['@class']):
trans = getattr(mod, d['@class'])
return trans()
raise ValueError("Invalid Comparator dict")
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class SpeciesComparator(AbstractComparator):
"""
A Comparator that matches species exactly. The default used in
StructureMatcher.
"""
def are_equal(self, sp1, sp2):
"""
True if species are exactly the same, i.e., Fe2+ == Fe2+ but not Fe3+.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are equal.
"""
return sp1 == sp2
def get_hash(self, composition):
"""
Returns: Fractional composition
"""
return composition.fractional_composition
class SpinComparator(AbstractComparator):
"""
A Comparator that matches magnetic structures to their inverse spins.
This comparator is primarily used to filter magnetically ordered
structures with opposite spins, which are equivalent.
"""
def are_equal(self, sp1, sp2):
"""
True if species are exactly the same, i.e., Fe2+ == Fe2+ but not
Fe3+. and the spins are reversed. i.e., spin up maps to spin down,
and vice versa.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are equal.
"""
for s1 in sp1.keys():
spin1 = getattr(s1, "spin", 0)
oxi1 = getattr(s1, "oxi_state", 0)
for s2 in sp2.keys():
spin2 = getattr(s2, "spin", 0)
oxi2 = getattr(s2, "oxi_state", 0)
if (s1.symbol == s2.symbol and oxi1 == oxi2 and
spin2 == -spin1):
break
else:
return False
return True
def get_hash(self, composition):
"""
Returns: Fractional composition
"""
return composition.fractional_composition
class ElementComparator(AbstractComparator):
"""
A Comparator that matches elements. i.e. oxidation states are
ignored.
"""
def are_equal(self, sp1, sp2):
"""
True if element:amounts are exactly the same, i.e.,
oxidation state is not considered.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are the same based on element
and amounts.
"""
comp1 = Composition(sp1)
comp2 = Composition(sp2)
return comp1.get_el_amt_dict() == comp2.get_el_amt_dict()
def get_hash(self, composition):
"""
Returns: Fractional element composition
"""
return composition.element_composition.fractional_composition
class FrameworkComparator(AbstractComparator):
"""
A Comparator that matches sites, regardless of species.
"""
def are_equal(self, sp1, sp2):
"""
True if there are atoms on both sites.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True always
"""
return True
def get_hash(self, composition):
"""
No hash possible
"""
return 1
class OrderDisorderElementComparator(AbstractComparator):
"""
A Comparator that matches sites, given some overlap in the element
composition
"""
def are_equal(self, sp1, sp2):
"""
True if there is some overlap in composition between the species
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True always
"""
set1 = set(sp1.element_composition.keys())
set2 = set(sp2.element_composition.keys())
if set1.intersection(set2):
return True
return False
def get_hash(self, composition):
""""
No hash possible
"""
return 1
class OccupancyComparator(AbstractComparator):
"""
A Comparator that matches occupancies on sites,
irrespective of the species of those sites.
"""
def are_equal(self, sp1, sp2):
"""
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True if sets of occupancies (amt) are equal on both sites.
"""
set1 = set(sp1.element_composition.values())
set2 = set(sp2.element_composition.values())
if set1 == set2:
return True
else:
return False
def get_hash(self, composition):
# Difficult to define sensible hash
return 1
class StructureMatcher(MSONable):
"""
Class to match structures by similarity.
Algorithm:
1. Given two structures: s1 and s2
2. Optional: Reduce to primitive cells.
3. If the number of sites do not match, return False
4. Reduce to s1 and s2 to Niggli Cells
5. Optional: Scale s1 and s2 to same volume.
6. Optional: Remove oxidation states associated with sites
7. Find all possible lattice vectors for s2 within shell of ltol.
8. For s1, translate an atom in the smallest set to the origin
9. For s2: find all valid lattices from permutations of the list
of lattice vectors (invalid if: det(Lattice Matrix) < half
volume of original s2 lattice)
10. For each valid lattice:
a. If the lattice angles of are within tolerance of s1,
basis change s2 into new lattice.
b. For each atom in the smallest set of s2:
i. Translate to origin and compare fractional sites in
structure within a fractional tolerance.
ii. If true:
ia. Convert both lattices to cartesian and place
both structures on an average lattice
ib. Compute and return the average and max rms
displacement between the two structures normalized
by the average free length per atom
if fit function called:
if normalized max rms displacement is less than
stol. Return True
if get_rms_dist function called:
if normalized average rms displacement is less
than the stored rms displacement, store and
continue. (This function will search all possible
lattices for the smallest average rms displacement
between the two structures)
Args:
ltol (float): Fractional length tolerance. Default is 0.2.
stol (float): Site tolerance. Defined as the fraction of the
average free length per atom := ( V / Nsites ) ** (1/3)
Default is 0.3.
angle_tol (float): Angle tolerance in degrees. Default is 5 degrees.
primitive_cell (bool): If true: input structures will be reduced to
primitive cells prior to matching. Default to True.
scale (bool): Input structures are scaled to equivalent volume if
true; For exact matching, set to False.
attempt_supercell (bool): If set to True and number of sites in
cells differ after a primitive cell reduction (divisible by an
integer) attempts to generate a supercell transformation of the
smaller cell which is equivalent to the larger structure.
allow_subset (bool): Allow one structure to match to the subset of
another structure. Eg. Matching of an ordered structure onto a
disordered one, or matching a delithiated to a lithiated
structure. This option cannot be combined with
attempt_supercell, or with structure grouping.
comparator (Comparator): A comparator object implementing an equals
method that declares declaring equivalency of sites. Default is
SpeciesComparator, which implies rigid species
mapping, i.e., Fe2+ only matches Fe2+ and not Fe3+.
Other comparators are provided, e.g., ElementComparator which
matches only the elements and not the species.
The reason why a comparator object is used instead of
supplying a comparison function is that it is not possible to
pickle a function, which makes it otherwise difficult to use
StructureMatcher with Python's multiprocessing.
supercell_size (str): Method to use for determining the size of a
supercell (if applicable). Possible values are num_sites,
num_atoms, volume, or an element present in both structures.
ignored_species (list): A list of ions to be ignored in matching. Useful
for matching structures that have similar frameworks except for
certain ions, e.g., Li-ion intercalation frameworks. This is more
useful than allow_subset because it allows better control over
what species are ignored in the matching.
"""
def __init__(self, ltol=0.2, stol=0.3, angle_tol=5, primitive_cell=True,
scale=True, attempt_supercell=False, allow_subset=False,
comparator=SpeciesComparator(), supercell_size='num_sites',
ignored_species=None):
self.ltol = ltol
self.stol = stol
self.angle_tol = angle_tol
self._comparator = comparator
self._primitive_cell = primitive_cell
self._scale = scale
self._supercell = attempt_supercell
self._supercell_size = supercell_size
self._subset = allow_subset
self._ignored_species = [] if ignored_species is None else \
ignored_species[:]
def _get_supercell_size(self, s1, s2):
"""
Returns the supercell size, and whether the supercell should
be applied to s1. If fu == 1, s1_supercell is returned as
true, to avoid ambiguity.
"""
if self._supercell_size == 'num_sites':
fu = s2.num_sites / s1.num_sites
elif self._supercell_size == 'num_atoms':
fu = s2.composition.num_atoms / s1.composition.num_atoms
elif self._supercell_size == 'volume':
fu = s2.volume / s1.volume
else:
try:
el = get_el_sp(self._supercell_size)
fu = s2.composition[el] / s1.composition[el]
except:
raise ValueError('invalid argument for supercell_size')
if fu < 2/3:
return int(round(1/fu)), False
else:
return int(round(fu)), True
def _get_lattices(self, target_lattice, s, supercell_size=1):
"""
Yields lattices for s with lengths and angles close to the
lattice of target_s. If supercell_size is specified, the
returned lattice will have that number of primitive cells
in it
Args:
s, target_s: Structure objects
"""
lattices = s.lattice.find_all_mappings(
target_lattice, ltol=self.ltol, atol=self.angle_tol,
skip_rotation_matrix=True)
for l, _, scale_m in lattices:
if abs(abs(np.linalg.det(scale_m)) - supercell_size) < 0.5:
yield l, scale_m
def _get_supercells(self, struct1, struct2, fu, s1_supercell):
"""
Computes all supercells of one structure close to the lattice of the
other
if s1_supercell == True, it makes the supercells of struct1, otherwise
it makes them of s2
yields: s1, s2, supercell_matrix, average_lattice, supercell_matrix
"""
def av_lat(l1, l2):
params = (np.array(l1.lengths_and_angles) +
np.array(l2.lengths_and_angles)) / 2
return Lattice.from_lengths_and_angles(*params)
def sc_generator(s1, s2):
s2_fc = np.array(s2.frac_coords)
if fu == 1:
cc = np.array(s1.cart_coords)
for l, sc_m in self._get_lattices(s2.lattice, s1, fu):
fc = l.get_fractional_coords(cc)
fc -= np.floor(fc)
yield fc, s2_fc, av_lat(l, s2.lattice), sc_m
else:
fc_init = np.array(s1.frac_coords)
for l, sc_m in self._get_lattices(s2.lattice, s1, fu):
fc = np.dot(fc_init, np.linalg.inv(sc_m))
lp = lattice_points_in_supercell(sc_m)
fc = (fc[:, None, :] + lp[None, :, :]).reshape((-1, 3))
fc -= np.floor(fc)
yield fc, s2_fc, av_lat(l, s2.lattice), sc_m
if s1_supercell:
for x in sc_generator(struct1, struct2):
yield x
else:
for x in sc_generator(struct2, struct1):
# reorder generator output so s1 is still first
yield x[1], x[0], x[2], x[3]
def _cmp_fstruct(self, s1, s2, frac_tol, mask):
"""
Returns true if a matching exists between s2 and s2
under frac_tol. s2 should be a subset of s1
"""
if len(s2) > len(s1):
raise ValueError("s1 must be larger than s2")
if mask.shape != (len(s2), len(s1)):
raise ValueError("mask has incorrect shape")
return is_coord_subset_pbc(s2, s1, frac_tol, mask)
def _cart_dists(self, s1, s2, avg_lattice, mask, normalization, lll_frac_tol=None):
"""
Finds a matching in cartesian space. Finds an additional
fractional translation vector to minimize RMS distance
Args:
s1, s2: numpy arrays of fractional coordinates. len(s1) >= len(s2)
avg_lattice: Lattice on which to calculate distances
mask: numpy array of booleans. mask[i, j] = True indicates
that s2[i] cannot be matched to s1[j]
normalization (float): inverse normalization length
Returns:
Distances from s2 to s1, normalized by (V/Natom) ^ 1/3
Fractional translation vector to apply to s2.
Mapping from s1 to s2, i.e. with numpy slicing, s1[mapping] => s2
"""
if len(s2) > len(s1):
raise ValueError("s1 must be larger than s2")
if mask.shape != (len(s2), len(s1)):
raise ValueError("mask has incorrect shape")
# vectors are from s2 to s1
vecs, d_2 = pbc_shortest_vectors(avg_lattice, s2, s1, mask,
return_d2=True,
lll_frac_tol=lll_frac_tol)
lin = LinearAssignment(d_2)
s = lin.solution
short_vecs = vecs[np.arange(len(s)), s]
translation = np.average(short_vecs, axis=0)
f_translation = avg_lattice.get_fractional_coords(translation)
new_d2 = np.sum((short_vecs - translation) ** 2, axis=-1)
return new_d2 ** 0.5 * normalization, f_translation, s
def _get_mask(self, struct1, struct2, fu, s1_supercell):
"""
Returns mask for matching struct2 to struct1. If struct1 has sites
a b c, and fu = 2, assumes supercells of struct2 will be ordered
aabbcc (rather than abcabc)
Returns:
mask, struct1 translation indices, struct2 translation index
"""
mask = np.zeros((len(struct2), len(struct1), fu), dtype=np.bool)
inner = []
for sp2, i in itertools.groupby(enumerate(struct2.species_and_occu),
key=lambda x: x[1]):
i = list(i)
inner.append((sp2, slice(i[0][0], i[-1][0]+1)))
for sp1, j in itertools.groupby(enumerate(struct1.species_and_occu),
key=lambda x: x[1]):
j = list(j)
j = slice(j[0][0], j[-1][0]+1)
for sp2, i in inner:
mask[i, j, :] = not self._comparator.are_equal(sp1, sp2)
if s1_supercell:
mask = mask.reshape((len(struct2), -1))
else:
# supercell is of struct2, roll fu axis back to preserve
# correct ordering
mask = np.rollaxis(mask, 2, 1)
mask = mask.reshape((-1, len(struct1)))
# find the best translation indices
i = np.argmax(np.sum(mask, axis=-1))
inds = np.where(np.invert(mask[i]))[0]
if s1_supercell:
# remove the symmetrically equivalent s1 indices
inds = inds[::fu]
return np.array(mask, dtype=np.int_), inds, i
def fit(self, struct1, struct2):
"""
Fit two structures.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
True or False.
"""
struct1, struct2 = self._process_species([struct1, struct2])
if not self._subset and self._comparator.get_hash(struct1.composition) \
!= self._comparator.get_hash(struct2.composition):
return None
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
match = self._match(struct1, struct2, fu, s1_supercell,
break_on_match=True)
if match is None:
return False
else:
return match[0] <= self.stol
def get_rms_dist(self, struct1, struct2):
"""
Calculate RMS displacement between two structures
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
rms displacement normalized by (Vol / nsites) ** (1/3)
and maximum distance between paired sites. If no matching
lattice is found None is returned.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
match = self._match(struct1, struct2, fu, s1_supercell, use_rms=True,
break_on_match=False)
if match is None:
return None
else:
return match[0], max(match[1])
def _process_species(self, structures):
copied_structures = []
for s in structures:
# We need the copies to be actual Structure to work properly, not
# subclasses. So do type(s) == Structure.
ss = s.copy() if type(s) == Structure else \
Structure.from_sites(s)
if self._ignored_species:
ss.remove_species(self._ignored_species)
copied_structures.append(ss)
return copied_structures
def _preprocess(self, struct1, struct2, niggli=True):
"""
Rescales, finds the reduced structures (primitive and niggli),
and finds fu, the supercell size to make struct1 comparable to
s2
"""
struct1 = struct1.copy()
struct2 = struct2.copy()
if niggli:
struct1 = struct1.get_reduced_structure(reduction_algo="niggli")
struct2 = struct2.get_reduced_structure(reduction_algo="niggli")
# primitive cell transformation
if self._primitive_cell:
struct1 = struct1.get_primitive_structure()
struct2 = struct2.get_primitive_structure()
if self._supercell:
fu, s1_supercell = self._get_supercell_size(struct1, struct2)
else:
fu, s1_supercell = 1, True
mult = fu if s1_supercell else 1/fu
# rescale lattice to same volume
if self._scale:
ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6)
nl1 = Lattice(struct1.lattice.matrix * ratio)
struct1.modify_lattice(nl1)
nl2 = Lattice(struct2.lattice.matrix / ratio)
struct2.modify_lattice(nl2)
return struct1, struct2, fu, s1_supercell
def _match(self, struct1, struct2, fu, s1_supercell=True, use_rms=False,
break_on_match=False):
"""
Matches one struct onto the other
"""
ratio = fu if s1_supercell else 1/fu
if len(struct1) * ratio >= len(struct2):
return self._strict_match(
struct1, struct2, fu, s1_supercell=s1_supercell,
break_on_match=break_on_match, use_rms=use_rms)
else:
return self._strict_match(
struct2, struct1, fu, s1_supercell=(not s1_supercell),
break_on_match=break_on_match, use_rms=use_rms)
def _strict_match(self, struct1, struct2, fu, s1_supercell=True,
use_rms=False, break_on_match=False):
"""
Matches struct2 onto struct1 (which should contain all sites in
struct2).
Args:
struct1, struct2 (Structure): structures to be matched
fu (int): size of supercell to create
s1_supercell (bool): whether to create the supercell of
struct1 (vs struct2)
use_rms (bool): whether to minimize the rms of the matching
break_on_match (bool): whether to stop search at first
valid match
"""
if fu < 1:
raise ValueError("fu cannot be less than 1")
mask, s1_t_inds, s2_t_ind = self._get_mask(struct1, struct2,
fu, s1_supercell)
if mask.shape[0] > mask.shape[1]:
raise ValueError('after supercell creation, struct1 must '
'have more sites than struct2')
# check that a valid mapping exists
if (not self._subset) and mask.shape[1] != mask.shape[0]:
return None
if LinearAssignment(mask).min_cost > 0:
return None
best_match = None
# loop over all lattices
for s1fc, s2fc, avg_l, sc_m in \
self._get_supercells(struct1, struct2, fu, s1_supercell):
# compute fractional tolerance
normalization = (len(s1fc) / avg_l.volume) ** (1/3)
inv_abc = np.array(avg_l.reciprocal_lattice.abc)
frac_tol = inv_abc * self.stol / (np.pi * normalization)
# loop over all translations
for s1i in s1_t_inds:
t = s1fc[s1i] - s2fc[s2_t_ind]
t_s2fc = s2fc + t
if self._cmp_fstruct(s1fc, t_s2fc, frac_tol, mask):
inv_lll_abc = np.array(avg_l.get_lll_reduced_lattice().reciprocal_lattice.abc)
lll_frac_tol = inv_lll_abc * self.stol / (np.pi * normalization)
dist, t_adj, mapping = self._cart_dists(
s1fc, t_s2fc, avg_l, mask, normalization, lll_frac_tol)
if use_rms:
val = np.linalg.norm(dist) / len(dist) ** 0.5
else:
val = max(dist)
if best_match is None or val < best_match[0]:
total_t = t + t_adj
total_t -= np.round(total_t)
best_match = val, dist, sc_m, total_t, mapping
if (break_on_match or val < 1e-5) and val < self.stol:
return best_match
if best_match and best_match[0] < self.stol:
return best_match
def group_structures(self, s_list, anonymous=False):
"""
Given a list of structures, use fit to group
them by structural equality.
Args:
s_list ([Structure]): List of structures to be grouped
anonymous (bool): Wheher to use anonymous mode.
Returns:
A list of lists of matched structures
Assumption: if s1 == s2 but s1 != s3, than s2 and s3 will be put
in different groups without comparison.
"""
if self._subset:
raise ValueError("allow_subset cannot be used with"
" group_structures")
original_s_list = list(s_list)
s_list = self._process_species(s_list)
# Use structure hash to pre-group structures
if anonymous:
c_hash = lambda c: c.anonymized_formula
else:
c_hash = self._comparator.get_hash
s_hash = lambda s: c_hash(s[1].composition)
sorted_s_list = sorted(enumerate(s_list), key=s_hash)
all_groups = []
# For each pre-grouped list of structures, perform actual matching.
for k, g in itertools.groupby(sorted_s_list, key=s_hash):
unmatched = list(g)
while len(unmatched) > 0:
i, refs = unmatched.pop(0)
matches = [i]
if anonymous:
inds = filter(lambda i: self.fit_anonymous(refs,
unmatched[i][1]), list(range(len(unmatched))))
else:
inds = filter(lambda i: self.fit(refs, unmatched[i][1]),
list(range(len(unmatched))))
inds = list(inds)
matches.extend([unmatched[i][0] for i in inds])
unmatched = [unmatched[i] for i in range(len(unmatched))
if i not in inds]
all_groups.append([original_s_list[i] for i in matches])
return all_groups
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"comparator": self._comparator.as_dict(),
"stol": self.stol,
"ltol": self.ltol,
"angle_tol": self.angle_tol,
"primitive_cell": self._primitive_cell,
"scale": self._scale}
@classmethod
def from_dict(cls, d):
return StructureMatcher(
ltol=d["ltol"], stol=d["stol"], angle_tol=d["angle_tol"],
primitive_cell=d["primitive_cell"], scale=d["scale"],
comparator=AbstractComparator.from_dict(d["comparator"]))
def _anonymous_match(self, struct1, struct2, fu, s1_supercell=True,
use_rms=False, break_on_match=False, single_match=False):
"""
Tries all permutations of matching struct1 to struct2.
Args:
struct1, struct2 (Structure): Preprocessed input structures
Returns:
List of (mapping, match)
"""
if not isinstance(self._comparator, SpeciesComparator):
raise ValueError('Anonymous fitting currently requires SpeciesComparator')
# check that species lists are comparable
sp1 = struct1.composition.elements
sp2 = struct2.composition.elements
if len(sp1) != len(sp2):
return None
ratio = fu if s1_supercell else 1/fu
swapped = len(struct1) * ratio < len(struct2)
s1_comp = struct1.composition
s2_comp = struct2.composition
matches = []
for perm in itertools.permutations(sp2):
sp_mapping = dict(zip(sp1, perm))
# do quick check that compositions are compatible
mapped_comp = Composition({sp_mapping[k]: v
for k, v in s1_comp.items()})
if (not self._subset) and (
self._comparator.get_hash(mapped_comp) !=
self._comparator.get_hash(s2_comp)):
continue
mapped_struct = struct1.copy()
mapped_struct.replace_species(sp_mapping)
if swapped:
m = self._strict_match(struct2, mapped_struct, fu,
(not s1_supercell), use_rms,
break_on_match)
else:
m = self._strict_match(mapped_struct, struct2, fu, s1_supercell,
use_rms, break_on_match)
if m:
matches.append((sp_mapping, m))
if single_match:
break
return matches
def get_rms_anonymous(self, struct1, struct2):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
(min_rms, min_mapping)
min_rms is the minimum rms distance, and min_mapping is the
corresponding minimal species mapping that would map
struct1 to struct2. (None, None) is returned if the minimax_rms
exceeds the threshold.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
use_rms=True, break_on_match=False)
if matches:
best = sorted(matches, key=lambda x: x[1][0])[0]
return best[1][0], best[0]
else:
return None, None
def get_best_electronegativity_anonymous_mapping(self, struct1, struct2):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar. If multiple substitutions are within tolerance
this will return the one which minimizes the difference in
electronegativity between the matches species.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
min_mapping (Dict): Mapping of struct1 species to struct2 species
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
use_rms=True, break_on_match=True)
if matches:
min_X_diff = np.inf
for m in matches:
X_diff = 0
for k, v in m[0].items():
X_diff += struct1.composition[k] * (k.X - v.X) ** 2
if X_diff < min_X_diff:
min_X_diff = X_diff
best = m[0]
return best
def get_all_anonymous_mappings(self, struct1, struct2, niggli=True,
include_dist=False):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. Returns a dictionary of species
substitutions that are within tolerance
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
niggli (bool): Find niggli cell in preprocessing
include_dist (bool): Return the maximin distance with each mapping
Returns:
list of species mappings that map struct1 to struct2.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2,
niggli)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
break_on_match=not include_dist)
if matches:
if include_dist:
return [(m[0], m[1][0]) for m in matches]
else:
return [m[0] for m in matches]
def fit_anonymous(self, struct1, struct2, niggli=True):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
True/False: Whether a species mapping can map struct1 to stuct2
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2,
niggli)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
break_on_match=True, single_match=True)
if matches:
return True
else:
return False
def get_supercell_matrix(self, supercell, struct):
"""
Returns the matrix for transforming struct to supercell. This
can be used for very distorted 'supercells' where the primitive cell
is impossible to find
"""
if self._primitive_cell:
raise ValueError("get_supercell_matrix cannot be used with the "
"primitive cell option")
struct, supercell, fu, s1_supercell = self._preprocess(struct,
supercell, False)
if not s1_supercell:
raise ValueError("The non-supercell must be put onto the basis"
" of the supercell, not the other way around")
match = self._match(struct, supercell, fu, s1_supercell, use_rms=True,
break_on_match=False)
if match is None:
return None
return match[2]
def get_transformation(self, struct1, struct2):
"""
Returns the supercell transformation, fractional translation vector,
and a mapping to transform struct2 to be similar to struct1.
Args:
struct1 (Structure): Reference structure
struct2 (Structure): Structure to transform.
Returns:
supercell (numpy.ndarray(3, 3)): supercell matrix
vector (numpy.ndarray(3)): fractional translation vector
mapping (list(int or None)):
The first len(struct1) items of the mapping vector are the
indices of struct1's corresponding sites in struct2 (or None
if there is no corresponding site), and the other items are
the remaining site indices of struct2.
"""
if self._primitive_cell:
raise ValueError("get_transformation cannot be used with the "
"primitive cell option")
struct1, struct2 = self._process_species((struct1, struct2))
s1, s2, fu, s1_supercell = self._preprocess(struct1, struct2, False)
ratio = fu if s1_supercell else 1/fu
if s1_supercell and fu > 1:
raise ValueError("Struct1 must be the supercell, "
"not the other way around")
if len(s1) * ratio >= len(s2):
# s1 is superset
match = self._strict_match(s1, s2, fu=fu, s1_supercell=False,
use_rms=True, break_on_match=False)
if match is None:
return None
# invert the mapping, since it needs to be from s1 to s2
mapping = [list(match[4]).index(i) if i in match[4] else None
for i in range(len(s1))]
return match[2], match[3], mapping
else:
# s2 is superset
match = self._strict_match(s2, s1, fu=fu, s1_supercell=True,
use_rms=True, break_on_match=False)
if match is None:
return None
# add sites not included in the mapping
not_included = list(range(len(s2) * fu))
for i in match[4]:
not_included.remove(i)
mapping = list(match[4]) + not_included
return match[2], -match[3], mapping
def get_s2_like_s1(self, struct1, struct2, include_ignored_species=True):
"""
Performs transformations on struct2 to put it in a basis similar to
struct1 (without changing any of the inter-site distances)
Args:
struct1 (Structure): Reference structure
struct2 (Structure): Structure to transform.
include_ignored_species (bool): Defaults to True,
the ignored_species is also transformed to the struct1
lattice orientation, though obviously there is no direct
matching to existing sites.
Returns:
A structure object similar to struct1, obtained by making a
supercell, sorting, and translating struct2.
"""
s1, s2 = self._process_species([struct1, struct2])
trans = self.get_transformation(s1, s2)
if trans is None:
return None
sc, t, mapping = trans
sites = [site for site in s2]
# Append the ignored sites at the end.
sites.extend([site for site in struct2 if site not in s2])
temp = Structure.from_sites(sites)
temp.make_supercell(sc)
temp.translate_sites(list(range(len(temp))), t)
# translate sites to correct unit cell
for i, j in enumerate(mapping[:len(s1)]):
if j is not None:
vec = np.round(struct1[i].frac_coords - temp[j].frac_coords)
temp.translate_sites(j, vec, to_unit_cell=False)
sites = [temp.sites[i] for i in mapping if i is not None]
if include_ignored_species:
start = int(round(len(temp) / len(struct2) * len(s2)))
sites.extend(temp.sites[start:])
return Structure.from_sites(sites)
def get_mapping(self, superset, subset):
"""
Calculate the mapping from superset to subset.
Args:
superset (Structure): Structure containing at least the sites in
subset (within the structure matching tolerance)
subset (Structure): Structure containing some of the sites in
superset (within the structure matching tolerance)
Returns:
numpy array such that superset.sites[mapping] is within matching
tolerance of subset.sites or None if no such mapping is possible
"""
if self._supercell:
raise ValueError("cannot compute mapping to supercell")
if self._primitive_cell:
raise ValueError("cannot compute mapping with primitive cell "
"option")
if len(subset) > len(superset):
raise ValueError("subset is larger than superset")
superset, subset, _, _ = self._preprocess(superset, subset, True)
match = self._strict_match(superset, subset, 1, break_on_match=False)
if match is None or match[0] > self.stol:
return None
return match[4]
| mit | -5,068,807,349,576,970,000 | 38.254743 | 98 | 0.572615 | false |
emkailu/PAT3DEM | bin/p3ctf.py | 1 | 6662 | #!/usr/bin/env python
import os
import sys
import argparse
import subprocess
from EMAN2 import *
def decim1(num):
# return a list of floats with 1 decimal
num_new = []
for i in num:
num_new += [float("{0:0.1f}".format(float(i)))]
return num_new
def ctf_read(ctftxt):
# return a list of values in ctftxt
with open(ctftxt) as ctftxt_r:
lines = ctftxt_r.readlines()
if len(lines) != 6:
print 'Please check {}!'.format(ctftxt)
sys.exit()
p1 = lines[3].replace(';', '').split()
p2 = lines[5].split()
return decim1([p1[6], p1[9], p1[12], p1[15], p2[6], p2[2], p2[1]])
def ctf(image, com_par):
# do ctf
basename = os.path.basename(os.path.splitext(image)[0])
# unify mrcs format to mrc format
if image[-5:] == '.mrcs':
image_link = basename+'.p3.mrc'
try:
os.symlink(image, image_link)
except OSError:
pass
# check avg_frame
avg_frame = com_par['movie'].split('\n')[-1]
basename = basename + avg_frame + '.p3'
# generate the com file
out = basename+'_ctffind4'
o_com = out + '.com'
o_log = out + '.log'
with open(o_com, 'w') as o_com_w:
o_com_w.write('{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format('#!/bin/bash', 'ctffind << eof', image_link, com_par['movie'], basename+'.ctf', com_par['apix'], com_par['voltage'], com_par['cs'], com_par['ac'], 512, com_par['minres'], com_par['maxres'], com_par['mindef'], com_par['maxdef'], com_par['step'], 100, 'no', 'eof'))
# run the com
with open(o_log, 'w') as write_log:
subprocess.call(['sh', o_com], stdout=write_log, stderr=subprocess.STDOUT)
def ctf_run(image, com_par):
# control the running of ctf
com_par['nimg'] = EMUtil.get_image_count(image)
basename = os.path.basename(os.path.splitext(image)[0])
ctftxt = '{}{}.p3.txt'.format(basename, com_par['nimg'])
# if ctftxt does not exist, start an initial run
if not os.path.isfile(ctftxt):
com_par['movie'] = 'yes\n{}'.format(com_par['nimg'])
com_par['minres'], com_par['maxres'], com_par['mindef'], com_par['maxdef'], com_par['step'] = 50, 5, 1000, 50000, 500
ctf(image, com_par)
elif os.path.isfile(ctftxt):
p1_minres, p1_maxres, p1_mindef, p1_maxdef, p2_maxres, p2_mindef, p2_maxdef = ctf_read(ctftxt)
# set minres based on defocus
if p2_maxdef > 25000:
p2_minres = 40.0
elif p2_maxdef > 12000:
p2_minres = 30.0
elif p2_maxdef > 7000:
p2_minres = 25.0
else:
p2_minres = 20.0
# for some bad images, you have to increase minres
while p2_minres <= p2_maxres:
p2_minres = p2_maxres + 1
# if parameters converged
if p2_minres == p1_minres and p2_maxres == p1_maxres and p2_mindef > p1_mindef and p2_maxdef < p1_maxdef or com_par['iter'] == 10:
# test avg frames
com_par['minres'], com_par['maxres'], com_par['mindef'], com_par['maxdef'], com_par['step'] = p2_minres, p2_maxres, p2_mindef-1000, p2_maxdef+2000, 100
for i in xrange(1, com_par['nimg']):
com_par['movie'] = 'yes\n{}'.format(i)
ctf(image, com_par)
# find the best avg
d_def = {}
best_ring = {}
for i in xrange(1, com_par['nimg']+1):
result = ctf_read('{}{}.p3.txt'.format(basename, i))
d_def[i] = result[6]-result[5]
best_ring[i] = result[4]
# get an inverse dictionary
inv = {}
for k, v in d_def.iteritems():
inv[v] = inv.get(v, []) + [k]
# we prefer the smallest d_def
find = 0
for defocus in sorted(inv):
# if d_def are the same, we prefer a larger avg
for avg in sorted(inv[defocus], reverse=True):
# if the best_ring didn't get worse, we find it!
if best_ring[avg] <= best_ring[com_par['nimg']]:
os.rename('{}{}.p3.txt'.format(basename, avg), '{}.txt'.format(basename))
os.rename('{}{}.p3.ctf'.format(basename, avg), '{}.ctf'.format(basename))
os.rename('{}{}.p3_ctffind4.log'.format(basename, avg), '{}_ctffind3.log'.format(basename))
find = 1
break
if find == 1:
break
# append as ctffind3 format for relion
with open('{}.txt'.format(basename)) as final_r:
lines = final_r.readlines()
line = lines[-1].split()
df1, df2, ast, cc = line[1], line[2], line[3], line[5]
xmag = com_par['dpsize'] * 10000 / com_par['apix']
with open('{}_ctffind3.log'.format(basename), 'a') as final_w:
final_w.write('CS[mm], HT[kV], AmpCnst, XMAG, DStep[um]\n')
final_w.write('{} {} {} {} {}\n\n'.format(com_par['cs'], com_par['voltage'], com_par['ac'], xmag, com_par['dpsize']))
final_w.write('DFMID1\tDFMID2\tANGAST\tCC\n')
final_w.write('{} {} {} {} Final Values'.format(df1, df2, ast, cc))
# delete intermediate files
for i in glob.glob(basename + '*.p3.*'):
os.unlink(i)
for i in glob.glob(basename + '*.p3_*'):
os.unlink(i)
return 'OK'
# else parameters haven't converged
else:
com_par['movie'] = 'yes\n{}'.format(com_par['nimg'])
com_par['minres'], com_par['maxres'], com_par['mindef'], com_par['maxdef'], com_par['step'] = p2_minres, p2_maxres, p2_mindef-1000, p2_maxdef+2000, 100
ctf(image, com_par)
com_par['iter'] += 1
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <movie.mrcs>
Run ctffind4 until parameters converge.
Needs:
'ctffind' command (v4.0.16, Rohou & Grigorieff, 2015)
"""
args_def = {'apix':1.25, 'voltage':200, 'cs':2, 'ac':0.1, 'dpsize':5}
parser = argparse.ArgumentParser()
parser.add_argument("image", nargs='*', help="specify images to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify apix, by default {}".format(args_def['apix']))
parser.add_argument("-v", "--voltage", type=int, help="specify the voltage (kV), by default {}".format(args_def['voltage']))
parser.add_argument("-c", "--cs", type=float, help="specify spherical abberration, by default {}".format(args_def['cs']))
parser.add_argument("-ac", "--ac", type=float, help="specify amplitude contrast, pure ice 0.04, carbon 0.1, by default {}".format(args_def['ac']))
parser.add_argument("-d", "--dpsize", type=float, help="specify detector pixel size (um), by default {}".format(args_def['dpsize']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# get common parameters
com_par = {'apix':args.apix, 'voltage':args.voltage, 'cs':args.cs, 'ac':args.ac, 'dpsize':args.dpsize}
# loop over all the input images
for image in args.image:
com_par['iter'] = 0
status = ctf_run(image, com_par)
while status != 'OK':
status = ctf_run(image, com_par)
if __name__ == '__main__':
main()
| mit | -4,892,923,597,455,956,000 | 39.13253 | 361 | 0.620684 | false |
sambitgaan/nupic | src/nupic/encoders/coordinate.py | 37 | 6557 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import hashlib
import itertools
import numpy
from nupic.bindings.math import Random
from nupic.encoders.base import Encoder
class CoordinateEncoder(Encoder):
"""
Given a coordinate in an N-dimensional space, and a radius around
that coordinate, the Coordinate Encoder returns an SDR representation
of that position.
The Coordinate Encoder uses an N-dimensional integer coordinate space.
For example, a valid coordinate in this space is (150, -49, 58), whereas
an invalid coordinate would be (55.4, -5, 85.8475).
It uses the following algorithm:
1. Find all the coordinates around the input coordinate, within the
specified radius.
2. For each coordinate, use a uniform hash function to
deterministically map it to a real number between 0 and 1. This is the
"order" of the coordinate.
3. Of these coordinates, pick the top W by order, where W is the
number of active bits desired in the SDR.
4. For each of these W coordinates, use a uniform hash function to
deterministically map it to one of the bits in the SDR. Make this bit active.
5. This results in a final SDR with exactly W bits active
(barring chance hash collisions).
"""
def __init__(self,
w=21,
n=1000,
name=None,
verbosity=0):
"""
See `nupic.encoders.base.Encoder` for more information.
@param name An optional string which will become part of the description
"""
# Validate inputs
if (w <= 0) or (w % 2 == 0):
raise ValueError("w must be an odd positive integer")
if (n <= 6 * w) or (not isinstance(n, int)):
raise ValueError("n must be an int strictly greater than 6*w. For "
"good results we recommend n be strictly greater "
"than 11*w")
self.w = w
self.n = n
self.verbosity = verbosity
self.encoders = None
if name is None:
name = "[%s:%s]" % (self.n, self.w)
self.name = name
def getWidth(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return self.n
def getDescription(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return [('coordinate', 0), ('radius', 1)]
def getScalars(self, inputData):
"""See `nupic.encoders.base.Encoder` for more information."""
return numpy.array([0]*len(inputData))
def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
@param inputData (tuple) Contains coordinate (numpy.array)
and radius (float)
@param output (numpy.array) Stores encoded SDR in this numpy array
"""
(coordinate, radius) = inputData
neighbors = self._neighbors(coordinate, radius)
winners = self._topWCoordinates(neighbors, self.w)
bitFn = lambda coordinate: self._bitForCoordinate(coordinate, self.n)
indices = numpy.array([bitFn(w) for w in winners])
output[:] = 0
output[indices] = 1
@staticmethod
def _neighbors(coordinate, radius):
"""
Returns coordinates around given coordinate, within given radius.
Includes given coordinate.
@param coordinate (numpy.array) Coordinate whose neighbors to find
@param radius (float) Radius around `coordinate`
@return (numpy.array) List of coordinates
"""
ranges = [range(n-radius, n+radius+1) for n in coordinate.tolist()]
return numpy.array(list(itertools.product(*ranges)))
@classmethod
def _topWCoordinates(cls, coordinates, w):
"""
Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order
"""
orders = numpy.array([cls._orderForCoordinate(c)
for c in coordinates.tolist()])
indices = numpy.argsort(orders)[-w:]
return coordinates[indices]
@staticmethod
def _hashCoordinate(coordinate):
"""Hash a coordinate to a 64 bit integer."""
coordinateStr = ",".join(str(v) for v in coordinate)
# Compute the hash and convert to 64 bit int.
hash = int(int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64))
return hash
@classmethod
def _orderForCoordinate(cls, coordinate):
"""
Returns the order for a coordinate.
@param coordinate (numpy.array) Coordinate
@return (float) A value in the interval [0, 1), representing the
order of the coordinate
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getReal64()
@classmethod
def _bitForCoordinate(cls, coordinate, n):
"""
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getUInt32(n)
def dump(self):
print "CoordinateEncoder:"
print " w: %d" % self.w
print " n: %d" % self.n
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.w = proto.w
encoder.n = proto.n
encoder.verbosity = proto.verbosity
encoder.name = proto.name
return encoder
def write(self, proto):
proto.w = self.w
proto.n = self.n
proto.verbosity = self.verbosity
proto.name = self.name
| agpl-3.0 | 3,658,378,859,817,901,600 | 30.524038 | 79 | 0.65167 | false |
sfam/home-assistant | homeassistant/components/media_player/denon.py | 5 | 5059 | """
homeassistant.components.media_player.denon
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides an interface to Denon Network Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.denon/
"""
import telnetlib
import logging
from homeassistant.components.media_player import (
MediaPlayerDevice, SUPPORT_PAUSE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_MUTE, SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK,
SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
DOMAIN)
from homeassistant.const import (
CONF_HOST, STATE_OFF, STATE_ON, STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
SUPPORT_DENON = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Denon platform. """
if not config.get(CONF_HOST):
_LOGGER.error(
"Missing required configuration items in %s: %s",
DOMAIN,
CONF_HOST)
return False
denon = DenonDevice(
config.get("name", "Music station"),
config.get("host")
)
if denon.update():
add_devices([denon])
return True
else:
return False
class DenonDevice(MediaPlayerDevice):
""" Represents a Denon device. """
# pylint: disable=too-many-public-methods
def __init__(self, name, host):
self._name = name
self._host = host
self._pwstate = "PWSTANDBY"
self._volume = 0
self._muted = False
self._mediasource = ""
@classmethod
def telnet_request(cls, telnet, command):
""" Executes `command` and returns the response. """
telnet.write(command.encode("ASCII") + b"\r")
return telnet.read_until(b"\r", timeout=0.2).decode("ASCII").strip()
def telnet_command(self, command):
""" Establishes a telnet connection and sends `command`. """
telnet = telnetlib.Telnet(self._host)
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
def update(self):
try:
telnet = telnetlib.Telnet(self._host)
except ConnectionRefusedError:
return False
self._pwstate = self.telnet_request(telnet, "PW?")
# PW? sends also SISTATUS, which is not interesting
telnet.read_until(b"\r", timeout=0.2)
volume_str = self.telnet_request(telnet, "MV?")[len("MV"):]
self._volume = int(volume_str) / 60
self._muted = (self.telnet_request(telnet, "MU?") == "MUON")
self._mediasource = self.telnet_request(telnet, "SI?")[len("SI"):]
telnet.close()
return True
@property
def name(self):
""" Returns the name of the device. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
if self._pwstate == "PWSTANDBY":
return STATE_OFF
if self._pwstate == "PWON":
return STATE_ON
return STATE_UNKNOWN
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
return self._volume
@property
def is_volume_muted(self):
""" Boolean if volume is currently muted. """
return self._muted
@property
def media_title(self):
""" Current media source. """
return self._mediasource
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_DENON
def turn_off(self):
""" turn_off media player. """
self.telnet_command("PWSTANDBY")
def volume_up(self):
""" volume_up media player. """
self.telnet_command("MVUP")
def volume_down(self):
""" volume_down media player. """
self.telnet_command("MVDOWN")
def set_volume_level(self, volume):
""" set volume level, range 0..1. """
# 60dB max
self.telnet_command("MV" + str(round(volume * 60)).zfill(2))
def mute_volume(self, mute):
""" mute (true) or unmute (false) media player. """
self.telnet_command("MU" + ("ON" if mute else "OFF"))
def media_play_pause(self):
""" media_play_pause media player. """
raise NotImplementedError()
def media_play(self):
""" media_play media player. """
self.telnet_command("NS9A")
def media_pause(self):
""" media_pause media player. """
self.telnet_command("NS9B")
def media_next_track(self):
""" Send next track command. """
self.telnet_command("NS9D")
def media_previous_track(self):
self.telnet_command("NS9E")
def media_seek(self, position):
raise NotImplementedError()
def turn_on(self):
""" turn the media player on. """
self.telnet_command("PWON")
| mit | 3,869,476,987,199,589,000 | 28.412791 | 76 | 0.596956 | false |
philipsd6/jrnl | jrnl/plugins/template_exporter.py | 3 | 1497 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
from .text_exporter import TextExporter
from .template import Template
import os
from glob import glob
class GenericTemplateExporter(TextExporter):
"""This Exporter can convert entries and journals into text files."""
@classmethod
def export_entry(cls, entry):
"""Returns a unicode representation of a single entry."""
vars = {
'entry': entry,
'tags': entry.tags
}
return cls.template.render_block("entry", **vars)
@classmethod
def export_journal(cls, journal):
"""Returns a unicode representation of an entire journal."""
vars = {
'journal': journal,
'entries': journal.entries,
'tags': journal.tags
}
return cls.template.render_block("journal", **vars)
def __exporter_from_file(template_file):
"""Create a template class from a file"""
name = os.path.basename(template_file).replace(".template", "")
template = Template.from_file(template_file)
return type(str("{}Exporter".format(name.title())), (GenericTemplateExporter, ), {
"names": [name],
"extension": template.extension,
"template": template
})
__all__ = []
# Factory pattern to create Exporter classes for all available templates
for template_file in glob("jrnl/templates/*.template"):
__all__.append(__exporter_from_file(template_file))
| mit | 8,822,666,731,958,224,000 | 29.55102 | 86 | 0.641951 | false |
jhpyle/docassemble | docassemble_base/docassemble/base/mako/ext/pygmentplugin.py | 1 | 4547 | # ext/pygmentplugin.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer, Python3Lexer
from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \
include, using
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Other
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from docassemble.base.mako import compat
class MakoLexer(RegexLexer):
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
tokens = {
'root': [
(r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)\s*(".*?")',
bygroups(Name.Attribute, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
name = 'HTML+Mako'
aliases = ['html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
aliases = ['xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
aliases = ['css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted',
linenos=True)
def syntax_highlight(filename='', language=None):
mako_lexer = MakoLexer()
if compat.py3k:
python_lexer = Python3Lexer()
else:
python_lexer = PythonLexer()
if filename.startswith('memory:') or language == 'mako':
return lambda string: highlight(string, mako_lexer,
pygments_html_formatter)
return lambda string: highlight(string, python_lexer,
pygments_html_formatter)
| mit | -4,397,171,927,666,605,000 | 34.80315 | 78 | 0.483616 | false |
whummer/moto | moto/awslambda/models.py | 1 | 25476 | from __future__ import unicode_literals
import base64
from collections import defaultdict
import copy
import datetime
import docker
import docker.errors
import hashlib
import io
import logging
import os
import json
import re
import zipfile
import uuid
import functools
import tarfile
import calendar
import threading
import traceback
import weakref
import requests.adapters
import boto.awslambda
from moto.core import BaseBackend, BaseModel
from moto.core.exceptions import RESTError
from moto.core.utils import unix_time_millis
from moto.s3.models import s3_backend
from moto.logs.models import logs_backends
from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings
from .utils import make_function_arn, make_function_ver_arn
logger = logging.getLogger(__name__)
ACCOUNT_ID = '123456789012'
try:
from tempfile import TemporaryDirectory
except ImportError:
from backports.tempfile import TemporaryDirectory
_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*')
_orig_adapter_send = requests.adapters.HTTPAdapter.send
docker_3 = docker.__version__[0] >= '3'
def zip2tar(zip_bytes):
with TemporaryDirectory() as td:
tarname = os.path.join(td, 'data.tar')
timeshift = int((datetime.datetime.now() -
datetime.datetime.utcnow()).total_seconds())
with zipfile.ZipFile(io.BytesIO(zip_bytes), 'r') as zipf, \
tarfile.TarFile(tarname, 'w') as tarf:
for zipinfo in zipf.infolist():
if zipinfo.filename[-1] == '/': # is_dir() is py3.6+
continue
tarinfo = tarfile.TarInfo(name=zipinfo.filename)
tarinfo.size = zipinfo.file_size
tarinfo.mtime = calendar.timegm(zipinfo.date_time) - timeshift
infile = zipf.open(zipinfo.filename)
tarf.addfile(tarinfo, infile)
with open(tarname, 'rb') as f:
tar_data = f.read()
return tar_data
class _VolumeRefCount:
__slots__ = "refcount", "volume"
def __init__(self, refcount, volume):
self.refcount = refcount
self.volume = volume
class _DockerDataVolumeContext:
_data_vol_map = defaultdict(lambda: _VolumeRefCount(0, None)) # {sha256: _VolumeRefCount}
_lock = threading.Lock()
def __init__(self, lambda_func):
self._lambda_func = lambda_func
self._vol_ref = None
@property
def name(self):
return self._vol_ref.volume.name
def __enter__(self):
# See if volume is already known
with self.__class__._lock:
self._vol_ref = self.__class__._data_vol_map[self._lambda_func.code_sha_256]
self._vol_ref.refcount += 1
if self._vol_ref.refcount > 1:
return self
# See if the volume already exists
for vol in self._lambda_func.docker_client.volumes.list():
if vol.name == self._lambda_func.code_sha_256:
self._vol_ref.volume = vol
return self
# It doesn't exist so we need to create it
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256)
if docker_3:
volumes = {self.name: {'bind': '/tmp/data', 'mode': 'rw'}}
else:
volumes = {self.name: '/tmp/data'}
container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes=volumes, detach=True)
try:
tar_bytes = zip2tar(self._lambda_func.code_bytes)
container.put_archive('/tmp/data', tar_bytes)
finally:
container.remove(force=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.__class__._lock:
self._vol_ref.refcount -= 1
if self._vol_ref.refcount == 0:
try:
self._vol_ref.volume.remove()
except docker.errors.APIError as e:
if e.status_code != 409:
raise
raise # multiple processes trying to use same volume?
class LambdaFunction(BaseModel):
def __init__(self, spec, region, validate_s3=True, version=1):
# required
self.region = region
self.code = spec['Code']
self.function_name = spec['FunctionName']
self.handler = spec['Handler']
self.role = spec['Role']
self.run_time = spec['Runtime']
self.logs_backend = logs_backends[self.region]
self.environment_vars = spec.get('Environment', {}).get('Variables', {})
self.docker_client = docker.from_env()
self.policy = ""
# Unfortunately mocking replaces this method w/o fallback enabled, so we
# need to replace it if we detect it's been mocked
if requests.adapters.HTTPAdapter.send != _orig_adapter_send:
_orig_get_adapter = self.docker_client.api.get_adapter
def replace_adapter_send(*args, **kwargs):
adapter = _orig_get_adapter(*args, **kwargs)
if isinstance(adapter, requests.adapters.HTTPAdapter):
adapter.send = functools.partial(_orig_adapter_send, adapter)
return adapter
self.docker_client.api.get_adapter = replace_adapter_send
# optional
self.description = spec.get('Description', '')
self.memory_size = spec.get('MemorySize', 128)
self.publish = spec.get('Publish', False) # this is ignored currently
self.timeout = spec.get('Timeout', 3)
self.logs_group_name = '/aws/lambda/{}'.format(self.function_name)
self.logs_backend.ensure_log_group(self.logs_group_name, [])
# this isn't finished yet. it needs to find out the VpcId value
self._vpc_config = spec.get(
'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []})
# auto-generated
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S')
if 'ZipFile' in self.code:
# more hackery to handle unicode/bytes/str in python3 and python2 -
# argh!
try:
to_unzip_code = base64.b64decode(
bytes(self.code['ZipFile'], 'utf-8'))
except Exception:
to_unzip_code = base64.b64decode(self.code['ZipFile'])
self.code_bytes = to_unzip_code
self.code_size = len(to_unzip_code)
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
# TODO: we should be putting this in a lambda bucket
self.code['UUID'] = str(uuid.uuid4())
self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID'])
else:
# validate s3 bucket and key
key = None
try:
# FIXME: does not validate bucket region
key = s3_backend.get_key(
self.code['S3Bucket'], self.code['S3Key'])
except MissingBucket:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist")
except MissingKey:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.")
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name)
self.tags = dict()
def set_version(self, version):
self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
@property
def vpc_config(self):
config = self._vpc_config.copy()
if config['SecurityGroupIds']:
config.update({"VpcId": "vpc-123abc"})
return config
@property
def physical_resource_id(self):
return self.function_name
def __repr__(self):
return json.dumps(self.get_configuration())
def get_configuration(self):
config = {
"CodeSha256": self.code_sha_256,
"CodeSize": self.code_size,
"Description": self.description,
"FunctionArn": self.function_arn,
"FunctionName": self.function_name,
"Handler": self.handler,
"LastModified": self.last_modified,
"MemorySize": self.memory_size,
"Role": self.role,
"Runtime": self.run_time,
"Timeout": self.timeout,
"Version": str(self.version),
"VpcConfig": self.vpc_config,
}
if self.environment_vars:
config['Environment'] = {
'Variables': self.environment_vars
}
return config
def get_code(self):
return {
"Code": {
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format(self.region, self.code['S3Key']),
"RepositoryType": "S3"
},
"Configuration": self.get_configuration(),
}
@staticmethod
def convert(s):
try:
return str(s, encoding='utf-8')
except Exception:
return s
@staticmethod
def is_json(test_str):
try:
response = json.loads(test_str)
except Exception:
response = test_str
return response
def _invoke_lambda(self, code, event=None, context=None):
# TODO: context not yet implemented
if event is None:
event = dict()
if context is None:
context = {}
try:
# TODO: I believe we can keep the container running and feed events as needed
# also need to hook it up to the other services so it can make kws/s3 etc calls
# Should get invoke_id /RequestId from invovation
env_vars = {
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
"AWS_LAMBDA_FUNCTION_VERSION": self.version,
"AWS_REGION": self.region,
}
env_vars.update(self.environment_vars)
container = output = exit_code = None
with _DockerDataVolumeContext(self) as data_vol:
try:
run_kwargs = dict(links={'motoserver': 'motoserver'}) if settings.TEST_SERVER_MODE else {}
container = self.docker_client.containers.run(
"lambci/lambda:{}".format(self.run_time),
[self.handler, json.dumps(event)], remove=False,
mem_limit="{}m".format(self.memory_size),
volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs)
finally:
if container:
try:
exit_code = container.wait(timeout=300)
except requests.exceptions.ReadTimeout:
exit_code = -1
container.stop()
container.kill()
else:
if docker_3:
exit_code = exit_code['StatusCode']
output = container.logs(stdout=False, stderr=True)
output += container.logs(stdout=True, stderr=False)
container.remove()
output = output.decode('utf-8')
# Send output to "logs" backend
invoke_id = uuid.uuid4().hex
log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
date=datetime.datetime.utcnow(), version=self.version, invoke_id=invoke_id
)
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
log_events = [{'timestamp': unix_time_millis(), "message": line}
for line in output.splitlines()]
self.logs_backend.put_log_events(self.logs_group_name, log_stream_name, log_events, None)
if exit_code != 0:
raise Exception(
'lambda invoke failed output: {}'.format(output))
# strip out RequestId lines
output = os.linesep.join([line for line in self.convert(output).splitlines() if not _stderr_regex.match(line)])
return output, False
except BaseException as e:
traceback.print_exc()
return "error running lambda: {}".format(e), True
def invoke(self, body, request_headers, response_headers):
payload = dict()
if body:
body = json.loads(body)
# Get the invocation type:
res, errored = self._invoke_lambda(code=self.code, event=body)
if request_headers.get("x-amz-invocation-type") == "RequestResponse":
encoded = base64.b64encode(res.encode('utf-8'))
response_headers["x-amz-log-result"] = encoded.decode('utf-8')
payload['result'] = response_headers["x-amz-log-result"]
result = res.encode('utf-8')
else:
result = json.dumps(payload)
if errored:
response_headers['x-amz-function-error'] = "Handled"
return result
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
region_name):
properties = cloudformation_json['Properties']
# required
spec = {
'Code': properties['Code'],
'FunctionName': resource_name,
'Handler': properties['Handler'],
'Role': properties['Role'],
'Runtime': properties['Runtime'],
}
optional_properties = 'Description MemorySize Publish Timeout VpcConfig Environment'.split()
# NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the
# default logic
for prop in optional_properties:
if prop in properties:
spec[prop] = properties[prop]
# when ZipFile is present in CloudFormation, per the official docs,
# the code it's a plaintext code snippet up to 4096 bytes.
# this snippet converts this plaintext code to a proper base64-encoded ZIP file.
if 'ZipFile' in properties['Code']:
spec['Code']['ZipFile'] = base64.b64encode(
cls._create_zipfile_from_plaintext_code(
spec['Code']['ZipFile']))
backend = lambda_backends[region_name]
fn = backend.create_function(spec)
return fn
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import \
UnformattedGetAttTemplateException
if attribute_name == 'Arn':
return make_function_arn(self.region, ACCOUNT_ID, self.function_name)
raise UnformattedGetAttTemplateException()
@staticmethod
def _create_zipfile_from_plaintext_code(code):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
zip_file.writestr('lambda_function.zip', code)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
class EventSourceMapping(BaseModel):
def __init__(self, spec):
# required
self.function_name = spec['FunctionName']
self.event_source_arn = spec['EventSourceArn']
self.starting_position = spec['StartingPosition']
# optional
self.batch_size = spec.get('BatchSize', 100)
self.enabled = spec.get('Enabled', True)
self.starting_position_timestamp = spec.get('StartingPositionTimestamp',
None)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
region_name):
properties = cloudformation_json['Properties']
spec = {
'FunctionName': properties['FunctionName'],
'EventSourceArn': properties['EventSourceArn'],
'StartingPosition': properties['StartingPosition']
}
optional_properties = 'BatchSize Enabled StartingPositionTimestamp'.split()
for prop in optional_properties:
if prop in properties:
spec[prop] = properties[prop]
return EventSourceMapping(spec)
class LambdaVersion(BaseModel):
def __init__(self, spec):
self.version = spec['Version']
def __repr__(self):
return str(self.logical_resource_id)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
region_name):
properties = cloudformation_json['Properties']
spec = {
'Version': properties.get('Version')
}
return LambdaVersion(spec)
class LambdaStorage(object):
def __init__(self):
# Format 'func_name' {'alias': {}, 'versions': []}
self._functions = {}
self._arns = weakref.WeakValueDictionary()
def _get_latest(self, name):
return self._functions[name]['latest']
def _get_version(self, name, version):
index = version - 1
try:
return self._functions[name]['versions'][index]
except IndexError:
return None
def _get_alias(self, name, alias):
return self._functions[name]['alias'].get(alias, None)
def get_function(self, name, qualifier=None):
if name not in self._functions:
return None
if qualifier is None:
return self._get_latest(name)
try:
return self._get_version(name, int(qualifier))
except ValueError:
return self._functions[name]['latest']
def list_versions_by_function(self, name):
if name not in self._functions:
return None
latest = copy.copy(self._functions[name]['latest'])
latest.function_arn += ':$LATEST'
return [latest] + self._functions[name]['versions']
def get_arn(self, arn):
return self._arns.get(arn, None)
def put_function(self, fn):
"""
:param fn: Function
:type fn: LambdaFunction
"""
if fn.function_name in self._functions:
self._functions[fn.function_name]['latest'] = fn
else:
self._functions[fn.function_name] = {
'latest': fn,
'versions': [],
'alias': weakref.WeakValueDictionary()
}
self._arns[fn.function_arn] = fn
def publish_function(self, name):
if name not in self._functions:
return None
if not self._functions[name]['latest']:
return None
new_version = len(self._functions[name]['versions']) + 1
fn = copy.copy(self._functions[name]['latest'])
fn.set_version(new_version)
self._functions[name]['versions'].append(fn)
self._arns[fn.function_arn] = fn
return fn
def del_function(self, name, qualifier=None):
if name in self._functions:
if not qualifier:
# Something is still reffing this so delete all arns
latest = self._functions[name]['latest'].function_arn
del self._arns[latest]
for fn in self._functions[name]['versions']:
del self._arns[fn.function_arn]
del self._functions[name]
return True
elif qualifier == '$LATEST':
self._functions[name]['latest'] = None
# If theres no functions left
if not self._functions[name]['versions'] and not self._functions[name]['latest']:
del self._functions[name]
return True
else:
fn = self.get_function(name, qualifier)
if fn:
self._functions[name]['versions'].remove(fn)
# If theres no functions left
if not self._functions[name]['versions'] and not self._functions[name]['latest']:
del self._functions[name]
return True
return False
def all(self):
result = []
for function_group in self._functions.values():
if function_group['latest'] is not None:
result.append(function_group['latest'])
result.extend(function_group['versions'])
return result
class LambdaBackend(BaseBackend):
def __init__(self, region_name):
self._lambdas = LambdaStorage()
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_function(self, spec):
function_name = spec.get('FunctionName', None)
if function_name is None:
raise RESTError('InvalidParameterValueException', 'Missing FunctionName')
fn = LambdaFunction(spec, self.region_name, version='$LATEST')
self._lambdas.put_function(fn)
if spec.get('Publish'):
ver = self.publish_function(function_name)
fn.version = ver.version
return fn
def publish_function(self, function_name):
return self._lambdas.publish_function(function_name)
def get_function(self, function_name, qualifier=None):
return self._lambdas.get_function(function_name, qualifier)
def list_versions_by_function(self, function_name):
return self._lambdas.list_versions_by_function(function_name)
def get_function_by_arn(self, function_arn):
return self._lambdas.get_arn(function_arn)
def delete_function(self, function_name, qualifier=None):
return self._lambdas.del_function(function_name, qualifier)
def list_functions(self):
return self._lambdas.all()
def send_message(self, function_name, message, subject=None, qualifier=None):
event = {
"Records": [
{
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:EXAMPLE",
"EventSource": "aws:sns",
"Sns": {
"SignatureVersion": "1",
"Timestamp": "1970-01-01T00:00:00.000Z",
"Signature": "EXAMPLE",
"SigningCertUrl": "EXAMPLE",
"MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e",
"Message": message,
"MessageAttributes": {
"Test": {
"Type": "String",
"Value": "TestString"
},
"TestBinary": {
"Type": "Binary",
"Value": "TestBinary"
}
},
"Type": "Notification",
"UnsubscribeUrl": "EXAMPLE",
"TopicArn": "arn:aws:sns:EXAMPLE",
"Subject": subject or "TestInvoke"
}
}
]
}
func = self._lambdas.get_function(function_name, qualifier)
func.invoke(json.dumps(event), {}, {})
def list_tags(self, resource):
return self.get_function_by_arn(resource).tags
def tag_resource(self, resource, tags):
fn = self.get_function_by_arn(resource)
if not fn:
return False
fn.tags.update(tags)
return True
def untag_resource(self, resource, tagKeys):
fn = self.get_function_by_arn(resource)
if fn:
for key in tagKeys:
try:
del fn.tags[key]
except KeyError:
pass
# Don't care
return True
return False
def add_policy(self, function_name, policy):
self.get_function(function_name).policy = policy
def do_validate_s3():
return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true']
# Handle us forgotten regions, unless Lambda truly only runs out of US and
lambda_backends = {_region.name: LambdaBackend(_region.name)
for _region in boto.awslambda.regions()}
lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2')
lambda_backends['us-gov-west-1'] = LambdaBackend('us-gov-west-1')
| apache-2.0 | 8,312,100,478,511,132,000 | 34.88169 | 141 | 0.558526 | false |
jdemon519/cfme_tests | utils/units.py | 6 | 3398 | # -*- coding: utf-8 -*-
import math
import re
# TODO: Split the 1000 and 1024 factor out. Now it is not an issue as it is used FOR COMPARISON ONLY
FACTOR = 1024
PREFIXES = ['', 'K', 'M', 'G', 'T', 'P']
FACTORS = {prefix: int(math.pow(FACTOR, i)) for i, prefix in enumerate(PREFIXES)}
UNITS = ['Byte', 'Bytes', 'B', 'b', 'Hz']
EQUAL_UNITS = {
'B': ('Byte', 'Bytes')
}
# Sanity check
for target_unit, units in EQUAL_UNITS.iteritems():
assert target_unit in UNITS
for unit in units:
assert unit in UNITS
REGEXP = re.compile(
r'^\s*(\d+(?:\.\d+)?)\s*({})?({})\s*$'.format('|'.join(PREFIXES), '|'.join(UNITS)))
class Unit(object):
"""This class serves for simple comparison of numbers that have units.
Imagine you pull a text value from the UI. 2 GB. By doing ``Unit.parse('2 GB')`` you get an
instance of :py:class:`Unit`, which is comparable.
You can compare two :py:class:`Unit` instances or you can compare :py:class:`Unit` with
:py:class:`int`, :py:class:`float` or any :py:class:`str` as long as it can go through the
:py:meth:`Unit.parse`.
If you compare :py:class:`Unit` only (or a string that gets subsequently parsed), it also takes
the kind of the unit it is, you cannot compare bytes with hertzes. It then calculates the
absolute value in the base units and that gets compared.
If you compare with a number, it does it like it was the number of the same unit. So eg.
doing::
Unit.parse('2 GB') == 2 *1024 * 1024 * 1024 `` is True
"""
__slots__ = ['number', 'prefix', 'unit_type']
@classmethod
def parse(cls, s):
s = str(s)
match = REGEXP.match(s)
if match is None:
raise ValueError('{} is not a proper value to be parsed!'.format(repr(s)))
number, prefix, unit_type = match.groups()
# Check if it isnt just an another name for another unit.
for target_unit, units in EQUAL_UNITS.iteritems():
if unit_type in units:
unit_type = target_unit
return cls(float(number), prefix, unit_type)
def __init__(self, number, prefix, unit_type):
self.number = float(number)
self.prefix = prefix
self.unit_type = unit_type
@property
def absolute(self):
return self.number * FACTORS[self.prefix]
def _as_same_unit(self, int_or_float):
return type(self)(int_or_float, PREFIXES[0], self.unit_type)
def __cmp__(self, other):
if isinstance(other, basestring):
other = self.parse(other)
elif isinstance(other, (int, float)):
other = self._as_same_unit(other)
elif not isinstance(other, Unit):
raise TypeError('Incomparable types {} and {}'.format(type(self), type(other)))
# other is instance of this class too now
if self.unit_type != other.unit_type:
raise TypeError('Incomparable units {} and {}'.format(self.unit_type, other.unit_type))
return cmp(self.absolute, other.absolute)
def __float__(self):
return self.absolute
def __int__(self):
return int(self.absolute)
def __repr__(self):
return '{}({}, {}, {})'.format(
type(self).__name__, repr(self.number), repr(self.prefix), repr(self.unit_type))
def __str__(self):
return '{} {}{}'.format(self.number, self.prefix, self.unit_type)
| gpl-2.0 | 1,580,423,731,653,379,800 | 34.030928 | 100 | 0.60771 | false |
mozilla/firefox-flicks | vendor-local/lib/python/djcelery/tests/test_backends/test_database.py | 3 | 3025 | from __future__ import absolute_import
from datetime import timedelta
from celery import current_app
from celery import states
from celery.result import AsyncResult
from celery.task import PeriodicTask
from celery.utils import gen_unique_id
from djcelery.backends.database import DatabaseBackend
from djcelery.utils import now
from djcelery.tests.utils import unittest
class SomeClass(object):
def __init__(self, data):
self.data = data
class MyPeriodicTask(PeriodicTask):
name = "c.u.my-periodic-task-244"
run_every = timedelta(seconds=1)
def run(self, **kwargs):
return 42
class TestDatabaseBackend(unittest.TestCase):
def test_backend(self):
b = DatabaseBackend()
tid = gen_unique_id()
self.assertEqual(b.get_status(tid), states.PENDING)
self.assertIsNone(b.get_result(tid))
b.mark_as_done(tid, 42)
self.assertEqual(b.get_status(tid), states.SUCCESS)
self.assertEqual(b.get_result(tid), 42)
tid2 = gen_unique_id()
result = {"foo": "baz", "bar": SomeClass(12345)}
b.mark_as_done(tid2, result)
# is serialized properly.
rindb = b.get_result(tid2)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
tid3 = gen_unique_id()
try:
raise KeyError("foo")
except KeyError, exception:
pass
b.mark_as_failure(tid3, exception)
self.assertEqual(b.get_status(tid3), states.FAILURE)
self.assertIsInstance(b.get_result(tid3), KeyError)
def test_forget(self):
b = DatabaseBackend()
tid = gen_unique_id()
b.mark_as_done(tid, {"foo": "bar"})
x = AsyncResult(tid)
self.assertEqual(x.result.get("foo"), "bar")
x.forget()
self.assertIsNone(x.result)
def test_group_store(self):
b = DatabaseBackend()
tid = gen_unique_id()
self.assertIsNone(b.restore_group(tid))
result = {"foo": "baz", "bar": SomeClass(12345)}
b.save_group(tid, result)
rindb = b.restore_group(tid)
self.assertIsNotNone(rindb)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
b.delete_group(tid)
self.assertIsNone(b.restore_group(tid))
def test_cleanup(self):
b = DatabaseBackend()
b.TaskModel._default_manager.all().delete()
ids = [gen_unique_id() for _ in xrange(3)]
for i, res in enumerate((16, 32, 64)):
b.mark_as_done(ids[i], res)
self.assertEqual(b.TaskModel._default_manager.count(), 3)
then = now() - current_app.conf.CELERY_TASK_RESULT_EXPIRES * 2
# Have to avoid save() because it applies the auto_now=True.
b.TaskModel._default_manager.filter(task_id__in=ids[:-1]) \
.update(date_done=then)
b.cleanup()
self.assertEqual(b.TaskModel._default_manager.count(), 1)
| bsd-3-clause | 2,561,283,935,348,965,400 | 29.555556 | 70 | 0.615207 | false |
JBonsink/GSOC-2013 | net_config/back_traf.py | 4 | 1406 | """ parameter required
sim_t
"""
#################################
## Parameter For Normal Case ##
#################################
# sim_t = 8000
start = 0
DEFAULT_PROFILE = ((sim_t,),(1,))
# gen_desc = {'TYPE':'harpoon', 'flow_size_mean':'4e5', 'flow_size_var':'100', 'flow_arrival_rate':'0.5'}
gen_desc = {'TYPE':'harpoon', 'flow_size_mean':'4e3', 'flow_size_var':'100', 'flow_arrival_rate':'0.5'}
NORM_DESC = dict(
TYPE = 'NORMAl',
start = '0',
node_para = {'states':[gen_desc]},
profile = DEFAULT_PROFILE,
# there will be traffic from any combination of src_nodes
# and dst_nodes
# src_nodes = [0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13],
# src_nodes = [0, 1, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13],
# dst_nodes = [0, 1, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13],
src_nodes = [0, 1, 2, 3, 4, 5, 7, 8],
dst_nodes = [0, 1, 2, 3, 4, 5, 7, 8],
)
# ANOMALY_TIME = (1200, 1400)
# ANO_DESC = {'anoType':'TARGET_ONE_SERVER',
ANO_DESC = dict(
# anoType = 'anomaly',
anoType = 'add_mod',
ano_node_seq = 9,
T = (1000, 1300),
gen_desc = gen_desc,
dst_nodes = [1],
# change = {'flow_size_mean':2},
# change = {'flow_arrival_rate':3},
# change = gen_desc,
# change = {'flow_arrival_rate':6},
# srv_id = 1,
)
ANO_LIST = [ANO_DESC]
| gpl-3.0 | -3,107,608,183,109,777,400 | 31.697674 | 105 | 0.470128 | false |
OptiPop/external_chromium_org | tools/perf/measurements/media.py | 26 | 2660 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import cpu
from metrics import media
from metrics import system_memory
from metrics import power
from telemetry.page import page_test
class Media(page_test.PageTest):
"""The MediaMeasurement class gathers media-related metrics on a page set.
Media metrics recorded are controlled by metrics/media.js. At the end of the
test each metric for every media element in the page are reported.
"""
def __init__(self):
super(Media, self).__init__('RunMediaMetrics')
self._media_metric = None
# Used to add browser power and CPU metrics to results per test.
self._add_browser_metrics = False
self._cpu_metric = None
self._memory_metric = None
self._power_metric = None
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def CustomizeBrowserOptions(self, options):
# Needed to run media actions in JS on touch-based devices as on Android.
options.AppendExtraBrowserArgs(
'--disable-gesture-requirement-for-media-playback')
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated."""
self._media_metric = media.MediaMetric(tab)
self._media_metric.Start(page, tab)
# Reset to false for every page.
self._add_browser_metrics = (page.add_browser_metrics
if hasattr(page, 'add_browser_metrics') else False)
if self._add_browser_metrics:
self._cpu_metric = cpu.CpuMetric(tab.browser)
self._cpu_metric.Start(page, tab)
self._memory_metric = system_memory.SystemMemoryMetric(tab.browser)
self._memory_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
"""Measure the page's performance."""
self._media_metric.Stop(page, tab)
trace_name = self._media_metric.AddResults(tab, results)
if self._add_browser_metrics:
self._cpu_metric.Stop(page, tab)
self._memory_metric.Stop(page, tab)
self._power_metric.Stop(page, tab)
self._cpu_metric.AddResults(tab, results, trace_name=trace_name)
exclude_metrics = ['WorkingSetSizePeak', 'SystemCommitCharge', 'VMPeak',
'VM']
self._memory_metric.AddResults(tab, results,
trace_name=trace_name,
exclude_metrics=exclude_metrics)
self._power_metric.AddResults(tab, results)
| bsd-3-clause | -1,825,316,912,215,399,700 | 38.117647 | 79 | 0.688722 | false |
apark263/tensorflow | tensorflow/contrib/timeseries/examples/lstm_test.py | 40 | 1760 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the LSTM example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import lstm
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.platform import test
class _SeedRunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
class LSTMExampleTest(test.TestCase):
def test_periodicity_learned(self):
(observed_times, observed_values,
all_times, predicted_values) = lstm.train_and_predict(
training_steps=2, estimator_config=_SeedRunConfig(),
export_directory=self.get_temp_dir())
self.assertAllEqual([100], observed_times.shape)
self.assertAllEqual([100, 5], observed_values.shape)
self.assertAllEqual([200], all_times.shape)
self.assertAllEqual([200, 5], predicted_values.shape)
# TODO(allenl): Make the model deterministic so you can check something
# substantive.
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,188,256,339,058,003,200 | 34.918367 | 80 | 0.707386 | false |
dvro/scikit-protopy | examples/plot_selection_example.py | 1 | 1639 | import numpy as np
import matplotlib.pyplot as plt
import random
from protopy.selection.enn import ENN
from protopy.selection.cnn import CNN
from protopy.selection.renn import RENN
from protopy.selection.allknn import AllKNN
from protopy.selection.tomek_links import TomekLinks
from protopy.selection.ssma import SSMA
mu1 = [4, 5]
si1 = [[0.75, 0.25], [0.25, 0.75]]
mu2 = [5, 5]
si2 = [[0.25, 0.75], [0.75, 0.25]]
samples = 100
X1 = np.random.multivariate_normal(
np.asarray(mu1), np.asarray(si1), samples)
X2 = np.random.multivariate_normal(
np.asarray(mu2), np.asarray(si2), samples)
X = np.vstack((X1, X2))
y = np.asarray([0] * samples + [1] * samples)
z = zip(X, y)
random.shuffle(z)
X, y = zip(*z)
X, y = np.asarray(X), np.asarray(y)
algorithms = [ENN(), RENN(), AllKNN(), TomekLinks(), CNN(), SSMA(max_loop=500)]
titles = ['ENN','RENN', 'AllKNN', 'TomekLinks', 'CNN', 'SSMA']
index = 0
f, subfig = plt.subplots(4,2)
for i in range(4):
for j in range(2):
if i == 0 and j == 0:
subfig[i][j].plot(X[y==0].T[0], X[y==0].T[1], 'bs', X[y==1].T[0], X[y==1].T[1],'ro')
subfig[i][j].axis([0, 10, 0, 10])
subfig[i][j].set_title('Original Dataset')
elif index < len(algorithms):
X_, y_ = algorithms[index].reduce_data(X, y)
print algorithms[index], 'reduction: %.2f' % (algorithms[index].reduction_)
subfig[i][j].plot(X_[y_==0].T[0], X_[y_==0].T[1], 'bs', X_[y_==1].T[0], X_[y_==1].T[1],'ro')
subfig[i][j].axis([0, 10, 0, 10])
subfig[i][j].set_title(titles[index])
index = index + 1
plt.show()
| bsd-2-clause | 1,370,264,468,952,154,000 | 28.267857 | 104 | 0.585723 | false |
bing-ads-sdk/BingAds-Python-SDK | bingads/v12/internal/bulk/object_writer.py | 1 | 1956 | from bingads.manifest import BULK_FORMAT_VERSION_6
from bingads.v12.internal.bulk.bulk_object_factory import _BulkObjectFactory
from bingads.v12.internal.bulk.csv_writer import _CsvWriter
from bingads.v12.internal.bulk.csv_headers import _CsvHeaders
from bingads.v12.internal.bulk.row_values import _RowValues
from bingads.v12.internal.bulk.string_table import _StringTable
class _BulkObjectWriter():
def __init__(self, file_path, file_type):
self._file_path = file_path
if file_type == 'Csv':
self._delimiter = ','
elif file_type == 'Tsv':
self._delimiter = '\t'
else:
raise ValueError('Invalid file_type provided: {0}'.format(file_type))
self._csv_writer = _CsvWriter(self.file_path, delimiter=self._delimiter)
self._csv_writer.__enter__()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._csv_writer.__exit__(exc_type, exc_val, exc_tb)
def close(self):
self.__exit__(None, None, None)
def write_file_metadata(self):
self.write_headers()
self.write_format_version()
def write_headers(self):
self._csv_writer.writerow(_CsvHeaders.HEADERS)
def write_format_version(self):
version_row = _RowValues()
version_row[_StringTable.Type] = _StringTable.SemanticVersion
version_row[_StringTable.Name] = BULK_FORMAT_VERSION_6
self._csv_writer.writerow(version_row.columns)
def write_object_row(self, bulk_object, exclude_readonly_data=False):
values = _RowValues()
bulk_object.write_to_row_values(values, exclude_readonly_data)
values[_StringTable.Type] = _BulkObjectFactory.get_bulk_row_type(bulk_object)
self._csv_writer.writerow(values.columns)
@property
def file_path(self):
return self._file_path
@property
def delimiter(self):
return self._delimiter
| mit | -6,633,296,943,759,418,000 | 32.724138 | 85 | 0.65593 | false |
soravux/scoop | bench/benchmark.py | 5 | 4302 | import sys
import scoop
import time
import argparse
import logging
import random
from serialization import find_pickling_speed
from functools import partial
def make_parser():
parser = argparse.ArgumentParser(
description=('Run a parametric benchmark of scoop.')
)
parser.add_argument('--time', type = float, default = 5.0,
help = "The mean time of each individual task")
parser.add_argument('--serialization-time', type = float, default = 0.01,
help = "The mean serialization time for each task")
parser.add_argument('--tries', type = int, default = 10,
help = ("The number of functions sent to the workers "
"for each level of the hierchy"))
parser.add_argument('--log', help = ("A filename to log the output "
"(optional). This is different than the"
'scoop "--log" option'))
parser.add_argument('--level', help = "Number of level in the hierarchy",
type = int, default = 2)
return parser
def print_header(args):
header = ("-------------------------------------------------\n"
"Benchmarking using these parameters:\n"
"tries: {0.tries}^{0.level} = {1}\n"
"time: {0.time} s\n"
"serialization time: {0.serialization_time} s\n"
"SCOOP Parameters:\n"
"number of workers: {2} workers\n"
"number of brokers: {3} brokers\n"
"SCOOP version: {4}\n"
"Python version {5}\n"
"-------------------------------------------------\n")
header = header.format(args, args.tries ** args.level,
scoop.SIZE, 1, scoop.__version__ + scoop.__revision__,
sys.version)
if args.log:
with open(args.log, 'a') as f:
f.write(header)
else:
print(header)
def test_function(_fake_data, cpu_time = 3.0, level = 0, number_of_tests = 1):
start_time = time.time()
test_partial = partial(test_function, number_of_tests = number_of_tests,
level = level - 1, cpu_time = cpu_time)
test_partial.__name__ = "test_partial"
total = 0
number_of_times = 0
while time.time() - start_time < cpu_time:
total += random.random()
number_of_times += 1
if level <= 1:
if number_of_times != 0:
return total / number_of_times
else:
return 0.5
else:
test_data = (_fake_data for _ in range(number_of_tests))
children = scoop.futures.map(test_partial, test_data)
return sum(children) / number_of_tests
def test(number_of_tests, cpu_time, serialization_time, log, levels):
test_partial = partial(test_function, number_of_tests = number_of_tests,
level = levels, cpu_time = cpu_time)
test_partial.__name__ = "test_partial"
fake_data_len = find_serialization_time(serialization_time, log)
fake_data = [random.random() for _ in range(fake_data_len)]
send_data = (fake_data for _ in range(number_of_tests))
begin_time = time.time()
result = list(scoop.futures.map(test_partial, send_data))
total_time = time.time() - begin_time
return result, total_time
def find_serialization_time(wanted_time, log):
speed = find_pickling_speed(500, 14)
if log:
with open(log, 'a') as f:
f.write("The pickling speed is {:g} bytes/s.\n".format(1/speed))
else:
print("The pickling speed is {:g} bytes/s.".format(1/speed))
return int(wanted_time / speed)
if __name__ == "__main__":
args = make_parser().parse_args()
print_header(args)
start_time = time.time()
result, test_time = test(args.tries, args.time,
args.serialization_time, args.log,
args.level)
end_time = time.time()
if args.log:
with open(args.log, 'a') as f:
f.write("Total time: {}\n".format(end_time - start_time))
f.write("Test time: {}\n".format(test_time))
else:
print("Total time: {}".format(end_time - start_time))
print("Test time: {}".format(test_time))
| lgpl-3.0 | -7,849,782,711,477,384,000 | 34.553719 | 80 | 0.548117 | false |
tacaswell/scikit-beam | skbeam/io/save_powder_output.py | 5 | 10661 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for saving integrated powder x-ray diffraction
intensities into different file formats.
(Output into different file formats, .chi, .dat and .xye)
"""
from __future__ import absolute_import, division, print_function
import logging
import os
import numpy as np
logger = logging.getLogger(__name__)
def save_output(tth, intensity, output_name, q_or_2theta, ext='.chi',
err=None, dir_path=None):
"""
Save output diffraction intensities into .chi, .dat or .xye file formats.
If the extension(ext) of the output file is not selected it will be
saved as a .chi file
Parameters
----------
tth : ndarray
twotheta values (degrees) or Q values (Angstroms)
shape (N, ) array
intensity : ndarray
intensity values (N, ) array
output_name : str
name for the saved output diffraction intensities
q_or_2theta : {'Q', '2theta'}
twotheta (degrees) or Q (Angstroms) values
ext : {'.chi', '.dat', '.xye'}, optional
save output diffraction intensities into .chi, .dat or
.xye file formats. (If the extension of output file is not
selected it will be saved as a .chi file)
err : ndarray, optional
error value of intensity shape(N, ) array
dir_path : str, optional
new directory path to save the output data files
eg: /Volumes/Data/experiments/data/
"""
if q_or_2theta not in set(['Q', '2theta']):
raise ValueError("It is expected to provide whether the data is"
" Q values(enter Q) or two theta values"
" (enter 2theta)")
if q_or_2theta == "Q":
des = ("""First column represents Q values (Angstroms) and second
column represents intensities and if there is a third
column it represents the error values of intensities.""")
else:
des = ("""First column represents two theta values (degrees) and
second column represents intensities and if there is
a third column it represents the error values of intensities.""")
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
with open(file_path, 'wb') as f:
_HEADER = """{out_name}
This file contains integrated powder x-ray diffraction
intensities.
{des}
Number of data points in the file : {n_pts}
######################################################"""
_encoding_writer(f, _HEADER.format(n_pts=len(tth),
out_name=output_name,
des=des))
new_line = "\n"
_encoding_writer(f, new_line)
if (err is None):
np.savetxt(f, np.c_[tth, intensity])
else:
np.savetxt(f, np.c_[tth, intensity, err])
def _encoding_writer(f, _HEADER):
"""
Encode the writer for python 3
Parameters
----------
f : str
file name
_HEADER : str
string need to be written in the file
"""
f.write(_HEADER.encode('utf-8'))
def gsas_writer(tth, intensity, output_name, mode=None,
err=None, dir_path=None):
"""
Save diffraction intensities into .gsas file format
Parameters
----------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
output_name : str
name for the saved output diffraction intensities
mode : {'STD', 'ESD', 'FXYE'}, optional
GSAS file formats, could be 'STD', 'ESD', 'FXYE'
err : ndarray, optional
error value of intensity shape(N, ) array
err is None then mode will be 'STD'
dir_path : str, optional
new directory path to save the output data files
eg: /Data/experiments/data/
"""
# save output diffraction intensities into .gsas file extension.
ext = '.gsas'
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
max_intensity = 999999
log_scale = np.floor(np.log10(max_intensity / np.max(intensity)))
log_scale = min(log_scale, 0)
scale = 10 ** int(log_scale)
lines = []
title = 'Angular Profile'
title += ': %s' % output_name
title += ' scale=%g' % scale
title = title[:80]
lines.append("%-80s" % title)
i_bank = 1
n_chan = len(intensity)
# two-theta0 and dtwo-theta in centidegrees
tth0_cdg = tth[0] * 100
dtth_cdg = (tth[-1] - tth[0]) / (len(tth) - 1) * 100
if err is None:
mode = 'STD'
if mode == 'STD':
n_rec = int(np.ceil(n_chan / 10.0))
l_bank = ("BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f STD" %
(i_bank, n_chan, n_rec, tth0_cdg, dtth_cdg, 0, 0))
lines.append("%-80s" % l_bank)
lrecs = ["%2i%6.0f" % (1, ii * scale) for ii in intensity]
for i in range(0, len(lrecs), 10):
lines.append("".join(lrecs[i:i + 10]))
elif mode == 'ESD':
n_rec = int(np.ceil(n_chan / 5.0))
l_bank = ("BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f ESD"
% (i_bank, n_chan, n_rec, tth0_cdg, dtth_cdg, 0, 0))
lines.append("%-80s" % l_bank)
l_recs = ["%8.0f%8.0f" % (ii, ee * scale)
for ii, ee in zip(intensity, err)]
for i in range(0, len(l_recs), 5):
lines.append("".join(l_recs[i:i + 5]))
elif mode == 'FXYE':
n_rec = n_chan
l_bank = ("BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f FXYE" %
(i_bank, n_chan, n_rec, tth0_cdg, dtth_cdg, 0, 0))
lines.append("%-80s" % l_bank)
l_recs = [
"%22.10f%22.10f%24.10f" % (xx * scale, yy * scale, ee * scale)
for xx, yy, ee in zip(tth, intensity, err)]
for i in range(len(l_recs)):
lines.append("%-80s" % l_recs[i])
else:
raise ValueError(" Define the GSAS file type ")
lines[-1] = "%-80s" % lines[-1]
rv = "\r\n".join(lines) + "\r\n"
with open(file_path, 'wt') as f:
f.write(rv)
def _validate_input(tth, intensity, err, ext):
"""
This function validate all the inputs
Parameters
----------
tth : ndarray
twotheta values (degrees) or Q space values (Angstroms)
intensity : ndarray
intensity values
err : ndarray, optional
error value of intensity
ext : {'.chi', '.dat', '.xye'}
save output diffraction intensities into .chi,
.dat or .xye file formats.
"""
if len(tth) != len(intensity):
raise ValueError("Number of intensities and the number of Q or"
" two theta values are different ")
if err is not None:
if len(intensity) != len(err):
raise ValueError("Number of intensities and the number of"
" err values are different")
if ext == '.xye' and err is None:
raise ValueError("Provide the Error value of intensity"
" (for .xye file format err != None)")
def _create_file_path(dir_path, output_name, ext):
"""
This function create a output file path to save
diffraction intensities.
Parameters
----------
dir_path : str
new directory path to save the output data files
eg: /Data/experiments/data/
output_name : str
name for the saved output diffraction intensities
ext : {'.chi', '.dat', '.xye'}
save output diffraction intensities into .chi,
.dat or .xye file formats.
Returns:
-------
file_path : str
path to save the diffraction intensities
"""
if (dir_path) is None:
file_path = output_name + ext
elif os.path.exists(dir_path):
file_path = os.path.join(dir_path, output_name) + ext
else:
raise ValueError('The given path does not exist.')
if os.path.isfile(file_path):
logger.info("Output file of diffraction intensities"
" already exists")
os.remove(file_path)
return file_path
| bsd-3-clause | 4,860,976,921,087,705,000 | 35.138983 | 77 | 0.553513 | false |
bitemyapp/ganeti | lib/workerpool.py | 6 | 18604 | #
#
# Copyright (C) 2008, 2009, 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base classes for worker pools.
"""
import logging
import threading
import heapq
import itertools
from ganeti import compat
from ganeti import errors
_TERMINATE = object()
_DEFAULT_PRIORITY = 0
class DeferTask(Exception):
"""Special exception class to defer a task.
This class can be raised by L{BaseWorker.RunTask} to defer the execution of a
task. Optionally, the priority of the task can be changed.
"""
def __init__(self, priority=None):
"""Initializes this class.
@type priority: number
@param priority: New task priority (None means no change)
"""
Exception.__init__(self)
self.priority = priority
class NoSuchTask(Exception):
"""Exception raised when a task can't be found.
"""
class BaseWorker(threading.Thread, object):
"""Base worker class for worker pools.
Users of a worker pool must override RunTask in a subclass.
"""
# pylint: disable=W0212
def __init__(self, pool, worker_id):
"""Constructor for BaseWorker thread.
@param pool: the parent worker pool
@param worker_id: identifier for this worker
"""
super(BaseWorker, self).__init__(name=worker_id)
self.pool = pool
self._worker_id = worker_id
self._current_task = None
assert self.getName() == worker_id
def ShouldTerminate(self):
"""Returns whether this worker should terminate.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
return self.pool._ShouldWorkerTerminateUnlocked(self)
finally:
self.pool._lock.release()
def GetCurrentPriority(self):
"""Returns the priority of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(priority, _, _, _) = self._current_task
return priority
finally:
self.pool._lock.release()
def SetTaskName(self, taskname):
"""Sets the name of the current task.
Should only be called from within L{RunTask}.
@type taskname: string
@param taskname: Task's name
"""
if taskname:
name = "%s/%s" % (self._worker_id, taskname)
else:
name = self._worker_id
# Set thread name
self.setName(name)
def _HasRunningTaskUnlocked(self):
"""Returns whether this worker is currently running a task.
"""
return (self._current_task is not None)
def _GetCurrentOrderAndTaskId(self):
"""Returns the order and task ID of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(_, order_id, task_id, _) = self._current_task
return (order_id, task_id)
finally:
self.pool._lock.release()
def run(self):
"""Main thread function.
Waits for new tasks to show up in the queue.
"""
pool = self.pool
while True:
assert self._current_task is None
defer = None
try:
# Wait on lock to be told either to terminate or to do a task
pool._lock.acquire()
try:
task = pool._WaitForTaskUnlocked(self)
if task is _TERMINATE:
# Told to terminate
break
if task is None:
# Spurious notification, ignore
continue
self._current_task = task
# No longer needed, dispose of reference
del task
assert self._HasRunningTaskUnlocked()
finally:
pool._lock.release()
(priority, _, _, args) = self._current_task
try:
# Run the actual task
assert defer is None
logging.debug("Starting task %r, priority %s", args, priority)
assert self.getName() == self._worker_id
try:
self.RunTask(*args) # pylint: disable=W0142
finally:
self.SetTaskName(None)
logging.debug("Done with task %r, priority %s", args, priority)
except DeferTask, err:
defer = err
if defer.priority is None:
# Use same priority
defer.priority = priority
logging.debug("Deferring task %r, new priority %s",
args, defer.priority)
assert self._HasRunningTaskUnlocked()
except: # pylint: disable=W0702
logging.exception("Caught unhandled exception")
assert self._HasRunningTaskUnlocked()
finally:
# Notify pool
pool._lock.acquire()
try:
if defer:
assert self._current_task
# Schedule again for later run
(_, _, task_id, args) = self._current_task
pool._AddTaskUnlocked(args, defer.priority, task_id)
if self._current_task:
self._current_task = None
pool._worker_to_pool.notifyAll()
finally:
pool._lock.release()
assert not self._HasRunningTaskUnlocked()
logging.debug("Terminates")
def RunTask(self, *args):
"""Function called to start a task.
This needs to be implemented by child classes.
"""
raise NotImplementedError()
class WorkerPool(object):
"""Worker pool with a queue.
This class is thread-safe.
Tasks are guaranteed to be started in the order in which they're
added to the pool. Due to the nature of threading, they're not
guaranteed to finish in the same order.
@type _tasks: list of tuples
@ivar _tasks: Each tuple has the format (priority, order ID, task ID,
arguments). Priority and order ID are numeric and essentially control the
sort order. The order ID is an increasing number denoting the order in
which tasks are added to the queue. The task ID is controlled by user of
workerpool, see L{AddTask} for details. The task arguments are C{None} for
abandoned tasks, otherwise a sequence of arguments to be passed to
L{BaseWorker.RunTask}). The list must fulfill the heap property (for use by
the C{heapq} module).
@type _taskdata: dict; (task IDs as keys, tuples as values)
@ivar _taskdata: Mapping from task IDs to entries in L{_tasks}
"""
def __init__(self, name, num_workers, worker_class):
"""Constructor for worker pool.
@param num_workers: number of workers to be started
(dynamic resizing is not yet implemented)
@param worker_class: the class to be instantiated for workers;
should derive from L{BaseWorker}
"""
# Some of these variables are accessed by BaseWorker
self._lock = threading.Lock()
self._pool_to_pool = threading.Condition(self._lock)
self._pool_to_worker = threading.Condition(self._lock)
self._worker_to_pool = threading.Condition(self._lock)
self._worker_class = worker_class
self._name = name
self._last_worker_id = 0
self._workers = []
self._quiescing = False
# Terminating workers
self._termworkers = []
# Queued tasks
self._counter = itertools.count()
self._tasks = []
self._taskdata = {}
# Start workers
self.Resize(num_workers)
# TODO: Implement dynamic resizing?
def _WaitWhileQuiescingUnlocked(self):
"""Wait until the worker pool has finished quiescing.
"""
while self._quiescing:
self._pool_to_pool.wait()
def _AddTaskUnlocked(self, args, priority, task_id):
"""Adds a task to the internal queue.
@type args: sequence
@param args: Arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
"""
assert isinstance(args, (tuple, list)), "Arguments must be a sequence"
assert isinstance(priority, (int, long)), "Priority must be numeric"
assert task_id is None or isinstance(task_id, (int, long)), \
"Task ID must be numeric or None"
task = [priority, self._counter.next(), task_id, args]
if task_id is not None:
assert task_id not in self._taskdata
# Keep a reference to change priority later if necessary
self._taskdata[task_id] = task
# A counter is used to ensure elements are processed in their incoming
# order. For processing they're sorted by priority and then counter.
heapq.heappush(self._tasks, task)
# Notify a waiting worker
self._pool_to_worker.notify()
def AddTask(self, args, priority=_DEFAULT_PRIORITY, task_id=None):
"""Adds a task to the queue.
@type args: sequence
@param args: arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
@note: The task ID can be essentially anything that can be used as a
dictionary key. Callers, however, must ensure a task ID is unique while a
task is in the pool or while it might return to the pool due to deferring
using L{DeferTask}.
"""
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
self._AddTaskUnlocked(args, priority, task_id)
finally:
self._lock.release()
def AddManyTasks(self, tasks, priority=_DEFAULT_PRIORITY, task_id=None):
"""Add a list of tasks to the queue.
@type tasks: list of tuples
@param tasks: list of args passed to L{BaseWorker.RunTask}
@type priority: number or list of numbers
@param priority: Priority for all added tasks or a list with the priority
for each task
@type task_id: list
@param task_id: List with the ID for each task
@note: See L{AddTask} for a note on task IDs.
"""
assert compat.all(isinstance(task, (tuple, list)) for task in tasks), \
"Each task must be a sequence"
assert (isinstance(priority, (int, long)) or
compat.all(isinstance(prio, (int, long)) for prio in priority)), \
"Priority must be numeric or be a list of numeric values"
assert task_id is None or isinstance(task_id, (tuple, list)), \
"Task IDs must be in a sequence"
if isinstance(priority, (int, long)):
priority = [priority] * len(tasks)
elif len(priority) != len(tasks):
raise errors.ProgrammerError("Number of priorities (%s) doesn't match"
" number of tasks (%s)" %
(len(priority), len(tasks)))
if task_id is None:
task_id = [None] * len(tasks)
elif len(task_id) != len(tasks):
raise errors.ProgrammerError("Number of task IDs (%s) doesn't match"
" number of tasks (%s)" %
(len(task_id), len(tasks)))
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
assert compat.all(isinstance(prio, (int, long)) for prio in priority)
assert len(tasks) == len(priority)
assert len(tasks) == len(task_id)
for (args, prio, tid) in zip(tasks, priority, task_id):
self._AddTaskUnlocked(args, prio, tid)
finally:
self._lock.release()
def ChangeTaskPriority(self, task_id, priority):
"""Changes a task's priority.
@param task_id: Task ID
@type priority: number
@param priority: New task priority
@raise NoSuchTask: When the task referred by C{task_id} can not be found
(it may never have existed, may have already been processed, or is
currently running)
"""
assert isinstance(priority, (int, long)), "Priority must be numeric"
self._lock.acquire()
try:
logging.debug("About to change priority of task %s to %s",
task_id, priority)
# Find old task
oldtask = self._taskdata.get(task_id, None)
if oldtask is None:
msg = "Task '%s' was not found" % task_id
logging.debug(msg)
raise NoSuchTask(msg)
# Prepare new task
newtask = [priority] + oldtask[1:]
# Mark old entry as abandoned (this doesn't change the sort order and
# therefore doesn't invalidate the heap property of L{self._tasks}).
# See also <http://docs.python.org/library/heapq.html#priority-queue-
# implementation-notes>.
oldtask[-1] = None
# Change reference to new task entry and forget the old one
assert task_id is not None
self._taskdata[task_id] = newtask
# Add a new task with the old number and arguments
heapq.heappush(self._tasks, newtask)
# Notify a waiting worker
self._pool_to_worker.notify()
finally:
self._lock.release()
def _WaitForTaskUnlocked(self, worker):
"""Waits for a task for a worker.
@type worker: L{BaseWorker}
@param worker: Worker thread
"""
while True:
if self._ShouldWorkerTerminateUnlocked(worker):
return _TERMINATE
# If there's a pending task, return it immediately
if self._tasks:
# Get task from queue and tell pool about it
try:
task = heapq.heappop(self._tasks)
finally:
self._worker_to_pool.notifyAll()
(_, _, task_id, args) = task
# If the priority was changed, "args" is None
if args is None:
# Try again
logging.debug("Found abandoned task (%r)", task)
continue
# Delete reference
if task_id is not None:
del self._taskdata[task_id]
return task
logging.debug("Waiting for tasks")
# wait() releases the lock and sleeps until notified
self._pool_to_worker.wait()
logging.debug("Notified while waiting")
def _ShouldWorkerTerminateUnlocked(self, worker):
"""Returns whether a worker should terminate.
"""
return (worker in self._termworkers)
def _HasRunningTasksUnlocked(self):
"""Checks whether there's a task running in a worker.
"""
for worker in self._workers + self._termworkers:
if worker._HasRunningTaskUnlocked(): # pylint: disable=W0212
return True
return False
def HasRunningTasks(self):
"""Checks whether there's at least one task running.
"""
self._lock.acquire()
try:
return self._HasRunningTasksUnlocked()
finally:
self._lock.release()
def Quiesce(self):
"""Waits until the task queue is empty.
"""
self._lock.acquire()
try:
self._quiescing = True
# Wait while there are tasks pending or running
while self._tasks or self._HasRunningTasksUnlocked():
self._worker_to_pool.wait()
finally:
self._quiescing = False
# Make sure AddTasks continues in case it was waiting
self._pool_to_pool.notifyAll()
self._lock.release()
def _NewWorkerIdUnlocked(self):
"""Return an identifier for a new worker.
"""
self._last_worker_id += 1
return "%s%d" % (self._name, self._last_worker_id)
def _ResizeUnlocked(self, num_workers):
"""Changes the number of workers.
"""
assert num_workers >= 0, "num_workers must be >= 0"
logging.debug("Resizing to %s workers", num_workers)
current_count = len(self._workers)
if current_count == num_workers:
# Nothing to do
pass
elif current_count > num_workers:
if num_workers == 0:
# Create copy of list to iterate over while lock isn't held.
termworkers = self._workers[:]
del self._workers[:]
else:
# TODO: Implement partial downsizing
raise NotImplementedError()
#termworkers = ...
self._termworkers += termworkers
# Notify workers that something has changed
self._pool_to_worker.notifyAll()
# Join all terminating workers
self._lock.release()
try:
for worker in termworkers:
logging.debug("Waiting for thread %s", worker.getName())
worker.join()
finally:
self._lock.acquire()
# Remove terminated threads. This could be done in a more efficient way
# (del self._termworkers[:]), but checking worker.isAlive() makes sure we
# don't leave zombie threads around.
for worker in termworkers:
assert worker in self._termworkers, ("Worker not in list of"
" terminating workers")
if not worker.isAlive():
self._termworkers.remove(worker)
assert not self._termworkers, "Zombie worker detected"
elif current_count < num_workers:
# Create (num_workers - current_count) new workers
for _ in range(num_workers - current_count):
worker = self._worker_class(self, self._NewWorkerIdUnlocked())
self._workers.append(worker)
worker.start()
def Resize(self, num_workers):
"""Changes the number of workers in the pool.
@param num_workers: the new number of workers
"""
self._lock.acquire()
try:
return self._ResizeUnlocked(num_workers)
finally:
self._lock.release()
def TerminateWorkers(self):
"""Terminate all worker threads.
Unstarted tasks will be ignored.
"""
logging.debug("Terminating all workers")
self._lock.acquire()
try:
self._ResizeUnlocked(0)
if self._tasks:
logging.debug("There are %s tasks left", len(self._tasks))
finally:
self._lock.release()
logging.debug("All workers terminated")
| bsd-2-clause | -8,704,374,748,152,898,000 | 28.251572 | 79 | 0.639647 | false |
timlib/webXray | webxray/Client.py | 1 | 4965 | # standard python
import base64
import bz2
import datetime
import json
import multiprocessing
import optparse
import os
import re
import socket
import sys
import time
import urllib.parse
import urllib.request
from webxray.ChromeDriver import ChromeDriver
class Client:
def __init__(self, server_url, pool_size=None):
"""
Init allows us to set a custom pool_size, otherwise
we base on CPU count.
"""
self.server_url = server_url
if pool_size:
self.pool_size = pool_size
else:
self.pool_size = multiprocessing.cpu_count()
# __init__
def get_and_process_client_tasks(self,proc_num):
"""
This is the main loop that should run indefintely. Purpose is to
send server "ready" message to get tasks which are either wait,
get_scan, or get_policy. If unable to get commands it will
wait and try again in 5 seconds. If command is get_scan or
get_policy, the appropriate action will be taken and results
will be sent as POST data back to server.
"""
local_test = False
debug = True
if local_test:
client_id = 'local_client'
wbxr_server_url = 'http://127.0.0.1:5000/'
else:
client_id = socket.gethostname()
wbxr_server_url = self.server_url
if debug: print(f'{client_id} [{proc_num}]\t😀 starting')
# main loop
while True:
# set up request
request = urllib.request.Request(
wbxr_server_url,
headers = {
'User-Agent' : 'wbxr_client_v0_0',
}
)
data = urllib.parse.urlencode({'ready':True,'client_id':client_id})
data = data.encode('utf8')
# attempt to get commands
if debug: print(f'[{proc_num}]\t📥 fetching commands')
try:
command_params = json.loads(urllib.request.urlopen(request,data,timeout=60).read().strip().decode('utf-8'))
except:
print(f'[{proc_num}]\t👎 Unable to contact server, will wait and try again.')
time.sleep(5)
continue
# process commands
task = command_params['task']
print('[%s]\t👉 TASK IS: %s' % (proc_num, task))
if task == 'wait':
time.sleep(10)
continue # restarts main loop
elif task == 'get_scan' or task == 'get_policy' or task == 'get_crawl' or task == 'get_random_crawl':
target = command_params['target']
client_config = command_params['client_config']
else:
print(f'[{proc_num}]\t🥴 CANNOT READ COMMAND SET, EXITING')
return
if debug: print('[%s]\t🚗 setting up driver' % proc_num)
if client_config['client_browser_type'] == 'chrome':
browser_driver = ChromeDriver(client_config, port_offset=proc_num)
else:
print('[%s]\t🥴 INVALID BROWSER TYPE, HARD EXIT!' % proc_num)
exit()
print(f'[{proc_num}]\t🏃♂️ GOING TO {task} on {str(target)[:30]}...')
if task == 'get_scan':
task_result = browser_driver.get_scan(target)
elif task == 'get_crawl':
task_result = browser_driver.get_crawl(target)
elif task == 'get_policy':
task_result = browser_driver.get_scan(target, get_text_only=True)
elif task == 'get_random_crawl':
task_result = browser_driver.get_random_crawl(target)
# unpack result
success = task_result['success']
task_result = task_result['result']
# if scan was successful we will have a big chunk of data
# so we compress it to speed up network xfer and reduce disk
# utilization while it is in the result queue
if success:
if debug: print(f'[{proc_num}]\t🗜️ compressing output for {str(target)[:30]}...')
task_result = base64.urlsafe_b64encode(bz2.compress(bytes(json.dumps(task_result),'utf-8')))
# build request to post results to server
if debug: print(f'[{proc_num}]\t📤 returning output')
data = urllib.parse.urlencode({
'client_id' : client_id,
'success' : json.dumps(success),
'target' : json.dumps(target),
'task' : task,
'task_result' : task_result
})
data = data.encode('utf-8')
# send the request
request = urllib.request.Request(
wbxr_server_url,
headers = {
'User-Agent' : 'wbxr_client_v0_0',
}
)
# adding charset parameter to the Content-Type header.
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
# note we can lose this result
try:
print(f'[{proc_num}]\t📥 RESPONSE: %s' % (urllib.request.urlopen(request,data,timeout=600).read().decode('utf-8')))
continue
except:
print(f'[{proc_num}]\t😖 Unable to post results!!!')
time.sleep(5)
return
# get_and_process_client_tasks
def run_client(self):
if sys.platform == 'darwin' and multiprocessing.get_start_method(allow_none=True) != 'forkserver':
multiprocessing.set_start_method('forkserver')
# processes all need a number, this also gets
# used as a port offset
proc_nums = []
for i in range(0,self.pool_size):
proc_nums.append(i)
# start workers
myPool = multiprocessing.Pool(self.pool_size)
myPool.map(self.get_and_process_client_tasks, proc_nums)
# run_client
# Client | gpl-3.0 | -6,543,108,149,990,522,000 | 27.616279 | 118 | 0.661248 | false |
apporc/cinder | cinder/volume/drivers/netapp/dataontap/fc_cmode.py | 3 | 3616 | # Copyright (c) - 2014, Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD,
driver.ExtendVD,
driver.TransferVD,
driver.SnapshotVD):
"""NetApp C-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh)
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
| apache-2.0 | -3,784,373,204,439,991,000 | 35.16 | 78 | 0.671737 | false |
jdemel/gnuradio | gr-blocks/python/blocks/qa_block_gateway.py | 1 | 9262 | from __future__ import division
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import numpy
import pmt
from gnuradio import gr, gr_unittest, blocks
class non_sync_block(gr.basic_block):
def __init__(self):
gr.basic_block.__init__(self,
name="non_sync_block",
in_sig=[numpy.float32],
out_sig=[numpy.float32, numpy.float32])
def general_work(self, input_items, output_items):
self.consume(0, len(input_items[0]))
self.produce(0,2)
self.produce(1,1)
return gr.WORK_CALLED_PRODUCE
class add_2_f32_1_f32(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "add 2 f32",
in_sig = [numpy.float32, numpy.float32],
out_sig = [numpy.float32],
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0] + input_items[1]
return len(output_items[0])
class add_2_fc32_1_fc32(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "add 2 fc32",
in_sig = [numpy.complex64, numpy.complex64],
out_sig = [numpy.complex64],
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0] + input_items[1]
return len(output_items[0])
class convolve(gr.sync_block):
"""
A demonstration using block history to properly perform a convolution.
"""
def __init__(self):
gr.sync_block.__init__(
self,
name = "convolve",
in_sig = [numpy.float32],
out_sig = [numpy.float32]
)
self._taps = [1, 0, 0, 0]
self.set_history(len(self._taps))
def work(self, input_items, output_items):
output_items[0][:] = numpy.convolve(input_items[0], self._taps, mode='valid')
return len(output_items[0])
class decim2x(gr.decim_block):
def __init__(self):
gr.decim_block.__init__(
self,
name = "decim2x",
in_sig = [numpy.float32],
out_sig = [numpy.float32],
decim = 2
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0][::2]
return len(output_items[0])
class interp2x(gr.interp_block):
def __init__(self):
gr.interp_block.__init__(
self,
name = "interp2x",
in_sig = [numpy.float32],
out_sig = [numpy.float32],
interp = 2
)
def work(self, input_items, output_items):
output_items[0][1::2] = input_items[0]
output_items[0][::2] = input_items[0]
return len(output_items[0])
class tag_source(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "tag source",
in_sig = None,
out_sig = [numpy.float32],
)
def work(self, input_items, output_items):
num_output_items = len(output_items[0])
#put code here to fill the output items...
#make a new tag on the middle element every time work is called
count = self.nitems_written(0) + num_output_items // 2
key = pmt.string_to_symbol("example_key")
value = pmt.string_to_symbol("example_value")
self.add_item_tag(0, count, key, value)
return num_output_items
class tag_sink(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "tag sink",
in_sig = [numpy.float32],
out_sig = None,
)
self.key = None
def work(self, input_items, output_items):
num_input_items = len(input_items[0])
#put code here to process the input items...
#print all the tags received in this work call
nread = self.nitems_read(0)
tags = self.get_tags_in_range(0, nread, nread+num_input_items)
for tag in tags:
#print tag.offset
#print pmt.symbol_to_string(tag.key)
#print pmt.symbol_to_string(tag.value)
self.key = pmt.symbol_to_string(tag.key)
return num_input_items
class tag_sink_win(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(self, name = "tag sink",
in_sig = [numpy.float32],
out_sig = None)
self.key = None
def work(self, input_items, output_items):
num_input_items = len(input_items[0])
tags = self.get_tags_in_window(0, 0, num_input_items)
for tag in tags:
self.key = pmt.symbol_to_string(tag.key)
return num_input_items
class fc32_to_f32_2(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "fc32_to_f32_2",
in_sig = [numpy.complex64],
out_sig = [(numpy.float32, 2)],
)
def work(self, input_items, output_items):
output_items[0][::,0] = numpy.real(input_items[0])
output_items[0][::,1] = numpy.imag(input_items[0])
return len(output_items[0])
class vector_to_stream(gr.interp_block):
def __init__(self, itemsize, nitems_per_block):
gr.interp_block.__init__(
self,
name = "vector_to_stream",
in_sig = [(itemsize, nitems_per_block)],
out_sig = [itemsize],
interp = nitems_per_block
)
self.block_size = nitems_per_block
def work(self, input_items, output_items):
n = 0
for i in range(len(input_items[0])):
for j in range(self.block_size):
output_items[0][n] = input_items[0][i][j]
n += 1
return len(output_items[0])
class test_block_gateway(gr_unittest.TestCase):
def test_add_f32(self):
tb = gr.top_block()
src0 = blocks.vector_source_f([1, 3, 5, 7, 9], False)
src1 = blocks.vector_source_f([0, 2, 4, 6, 8], False)
adder = add_2_f32_1_f32()
adder.name()
sink = blocks.vector_sink_f()
tb.connect((src0, 0), (adder, 0))
tb.connect((src1, 0), (adder, 1))
tb.connect(adder, sink)
tb.run()
self.assertEqual(sink.data(), [1, 5, 9, 13, 17])
def test_add_fc32(self):
tb = gr.top_block()
src0 = blocks.vector_source_c([1, 3j, 5, 7j, 9], False)
src1 = blocks.vector_source_c([0, 2j, 4, 6j, 8], False)
adder = add_2_fc32_1_fc32()
sink = blocks.vector_sink_c()
tb.connect((src0, 0), (adder, 0))
tb.connect((src1, 0), (adder, 1))
tb.connect(adder, sink)
tb.run()
self.assertEqual(sink.data(), [1, 5j, 9, 13j, 17])
def test_convolve(self):
tb = gr.top_block()
src = blocks.vector_source_f([1, 2, 3, 4, 5, 6, 7, 8], False)
cv = convolve()
sink = blocks.vector_sink_f()
tb.connect(src, cv, sink)
tb.run()
self.assertEqual(sink.data(), [1, 2, 3, 4, 5, 6, 7, 8])
def test_decim2x(self):
tb = gr.top_block()
src = blocks.vector_source_f([1, 2, 3, 4, 5, 6, 7, 8], False)
d2x = decim2x()
sink = blocks.vector_sink_f()
tb.connect(src, d2x, sink)
tb.run()
self.assertEqual(sink.data(), [1, 3, 5, 7])
def test_interp2x(self):
tb = gr.top_block()
src = blocks.vector_source_f([1, 3, 5, 7, 9], False)
i2x = interp2x()
sink = blocks.vector_sink_f()
tb.connect(src, i2x, sink)
tb.run()
self.assertEqual(sink.data(), [1, 1, 3, 3, 5, 5, 7, 7, 9, 9])
def test_tags(self):
src = tag_source()
sink = tag_sink()
head = blocks.head(gr.sizeof_float, 50000) #should be enough items to get a tag through
tb = gr.top_block()
tb.connect(src, head, sink)
tb.run()
self.assertEqual(sink.key, "example_key")
def test_tags_win(self):
src = tag_source()
sink = tag_sink_win()
head = blocks.head(gr.sizeof_float, 50000) #should be enough items to get a tag through
tb = gr.top_block()
tb.connect(src, head, sink)
tb.run()
self.assertEqual(sink.key, "example_key")
def test_fc32_to_f32_2(self):
tb = gr.top_block()
src = blocks.vector_source_c([1+2j, 3+4j, 5+6j, 7+8j, 9+10j], False)
convert = fc32_to_f32_2()
v2s = vector_to_stream(numpy.float32, 2)
sink = blocks.vector_sink_f()
tb.connect(src, convert, v2s, sink)
tb.run()
self.assertEqual(sink.data(), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def test_non_sync_block(self):
tb = gr.top_block ()
src = blocks.vector_source_f(range(1000000))
sinks = [blocks.vector_sink_f(), blocks.vector_sink_f()]
dut = non_sync_block()
tb.connect(src, dut)
tb.connect((dut,0), sinks[0])
tb.connect((dut,1), sinks[1])
tb.run ()
self.assertEqual(len(sinks[0].data()), 2*len(sinks[1].data()))
if __name__ == '__main__':
gr_unittest.run(test_block_gateway, "test_block_gateway.xml")
| gpl-3.0 | -8,985,375,428,202,301,000 | 30.39661 | 95 | 0.536061 | false |
cloudbau/nova | nova/tests/api/openstack/compute/test_images.py | 9 | 40740 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests of the new image services, both as a service layer,
and as a WSGI layer
"""
import copy
import urlparse
from lxml import etree
import webob
from nova.api.openstack.compute import images
from nova.api.openstack.compute.views import images as images_view
from nova.api.openstack import xmlutil
from nova import exception
from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
NOW_API_FORMAT = "2010-10-11T10:30:22Z"
class ImagesControllerTest(test.NoDBTestCase):
"""
Test of the OpenStack API /images application controller w/Glance.
"""
def setUp(self):
"""Run before each test."""
super(ImagesControllerTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fakes.stub_out_compute_api_backup(self.stubs)
fakes.stub_out_glance(self.stubs)
self.controller = images.Controller()
self.uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
self.url = '/v2/fake/images/detail?server=' + self.uuid
self.server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
self.server_href = (
"http://localhost/v2/fake/servers/" + self.server_uuid)
self.server_bookmark = (
"http://localhost/fake/servers/" + self.server_uuid)
self.alternate = "%s/fake/images/%s"
self.fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123')
self.actual_image = self.controller.show(self.fake_req, '124')
self.expected_image_123 = {
"image": {'id': '123',
'name': 'public image',
'metadata': {'key1': 'value1'},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ACTIVE',
'minDisk': 10,
'progress': 100,
'minRam': 128,
"links": [{
"rel": "self",
"href":
"http://localhost/v2/fake/images/123",
},
{
"rel": "bookmark",
"href":
"http://localhost/fake/images/123",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": self.alternate %
(glance.generate_glance_url(),
123),
}],
},
}
self.expected_image_124 = {
"image": {'id': '124',
'name': 'queued snapshot',
'metadata': {
u'instance_uuid': self.server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'SAVING',
'progress': 25,
'minDisk': 0,
'minRam': 0,
'server': {
'id': self.server_uuid,
"links": [{
"rel": "self",
"href": self.server_href,
},
{
"rel": "bookmark",
"href": self.server_bookmark,
}],
},
"links": [{
"rel": "self",
"href":
"http://localhost/v2/fake/images/124",
},
{
"rel": "bookmark",
"href":
"http://localhost/fake/images/124",
},
{
"rel": "alternate",
"type":
"application/vnd.openstack.image",
"href": self.alternate %
(glance.generate_glance_url(),
124),
}],
},
}
self.image_service = self.mox.CreateMockAnything()
def test_get_image(self):
self.assertThat(self.actual_image,
matchers.DictMatches(self.expected_image_124))
def test_get_image_with_custom_prefix(self):
self.flags(osapi_compute_link_prefix='https://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123')
actual_image = self.controller.show(fake_req, '124')
expected_image = self.expected_image_124
expected_image["image"]["links"][0]["href"] = (
"https://zoo.com:42/v2/fake/images/124")
expected_image["image"]["links"][1]["href"] = (
"https://zoo.com:42/fake/images/124")
expected_image["image"]["links"][2]["href"] = (
"http://circus.com:34/fake/images/124")
expected_image["image"]["server"]["links"][0]["href"] = (
"https://zoo.com:42/v2/fake/servers/" + self.server_uuid)
expected_image["image"]["server"]["links"][1]["href"] = (
"https://zoo.com:42/fake/servers/" + self.server_uuid)
self.assertThat(actual_image, matchers.DictMatches(expected_image))
def test_get_image_404(self):
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/unknown')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, fake_req, 'unknown')
def test_get_image_details(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
response = self.controller.detail(request)
response_list = response["images"]
image_125 = copy.deepcopy(self.expected_image_124["image"])
image_125['id'] = '125'
image_125['name'] = 'saving snapshot'
image_125['progress'] = 50
image_125["links"][0]["href"] = "http://localhost/v2/fake/images/125"
image_125["links"][1]["href"] = "http://localhost/fake/images/125"
image_125["links"][2]["href"] = (
"%s/fake/images/125" % glance.generate_glance_url())
image_126 = copy.deepcopy(self.expected_image_124["image"])
image_126['id'] = '126'
image_126['name'] = 'active snapshot'
image_126['status'] = 'ACTIVE'
image_126['progress'] = 100
image_126["links"][0]["href"] = "http://localhost/v2/fake/images/126"
image_126["links"][1]["href"] = "http://localhost/fake/images/126"
image_126["links"][2]["href"] = (
"%s/fake/images/126" % glance.generate_glance_url())
image_127 = copy.deepcopy(self.expected_image_124["image"])
image_127['id'] = '127'
image_127['name'] = 'killed snapshot'
image_127['status'] = 'ERROR'
image_127['progress'] = 0
image_127["links"][0]["href"] = "http://localhost/v2/fake/images/127"
image_127["links"][1]["href"] = "http://localhost/fake/images/127"
image_127["links"][2]["href"] = (
"%s/fake/images/127" % glance.generate_glance_url())
image_128 = copy.deepcopy(self.expected_image_124["image"])
image_128['id'] = '128'
image_128['name'] = 'deleted snapshot'
image_128['status'] = 'DELETED'
image_128['progress'] = 0
image_128["links"][0]["href"] = "http://localhost/v2/fake/images/128"
image_128["links"][1]["href"] = "http://localhost/fake/images/128"
image_128["links"][2]["href"] = (
"%s/fake/images/128" % glance.generate_glance_url())
image_129 = copy.deepcopy(self.expected_image_124["image"])
image_129['id'] = '129'
image_129['name'] = 'pending_delete snapshot'
image_129['status'] = 'DELETED'
image_129['progress'] = 0
image_129["links"][0]["href"] = "http://localhost/v2/fake/images/129"
image_129["links"][1]["href"] = "http://localhost/fake/images/129"
image_129["links"][2]["href"] = (
"%s/fake/images/129" % glance.generate_glance_url())
image_130 = copy.deepcopy(self.expected_image_123["image"])
image_130['id'] = '130'
image_130['name'] = None
image_130['metadata'] = {}
image_130['minDisk'] = 0
image_130['minRam'] = 0
image_130["links"][0]["href"] = "http://localhost/v2/fake/images/130"
image_130["links"][1]["href"] = "http://localhost/fake/images/130"
image_130["links"][2]["href"] = (
"%s/fake/images/130" % glance.generate_glance_url())
image_131 = copy.deepcopy(self.expected_image_123["image"])
image_131['id'] = '131'
image_131['name'] = None
image_131['metadata'] = {}
image_131['minDisk'] = 0
image_131['minRam'] = 0
image_131["links"][0]["href"] = "http://localhost/v2/fake/images/131"
image_131["links"][1]["href"] = "http://localhost/fake/images/131"
image_131["links"][2]["href"] = (
"%s/fake/images/131" % glance.generate_glance_url())
expected = [self.expected_image_123["image"],
self.expected_image_124["image"],
image_125, image_126, image_127,
image_128, image_129, image_130,
image_131]
self.assertThat(expected, matchers.DictListMatches(response_list))
def test_get_image_details_with_limit(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2')
response = self.controller.detail(request)
response_list = response["images"]
response_links = response["images_links"]
expected = [self.expected_image_123["image"],
self.expected_image_124["image"]]
self.assertThat(expected, matchers.DictListMatches(response_list))
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['124']},
matchers.DictMatches(params))
def test_get_image_details_with_limit_and_page_size(self):
request = fakes.HTTPRequest.blank(
'/v2/fake/images/detail?limit=2&page_size=1')
response = self.controller.detail(request)
response_list = response["images"]
response_links = response["images_links"]
expected = [self.expected_image_123["image"],
self.expected_image_124["image"]]
self.assertThat(expected, matchers.DictListMatches(response_list))
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'page_size': ['1'],
'marker': ['124']}, matchers.DictMatches(params))
def _detail_request(self, filters, request):
context = request.environ['nova.context']
self.image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=self.image_service)
controller.detail(request)
def test_image_detail_filter_with_name(self):
filters = {'name': 'testname'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?name=testname')
self._detail_request(filters, request)
def test_image_detail_filter_with_status(self):
filters = {'status': 'active'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?status=ACTIVE')
self._detail_request(filters, request)
def test_image_detail_filter_with_property(self):
filters = {'property-test': '3'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?property-test=3')
self._detail_request(filters, request)
def test_image_detail_filter_server_href(self):
ref = 'http://localhost:8774/servers/' + self.uuid
filters = {'property-instance_uuid': self.uuid}
request = fakes.HTTPRequest.blank(self.url)
self._detail_request(filters, request)
def test_image_detail_filter_server_uuid(self):
filters = {'property-instance_uuid': self.uuid}
request = fakes.HTTPRequest.blank(self.url)
self._detail_request(filters, request)
def test_image_detail_filter_changes_since(self):
filters = {'changes-since': '2011-01-24T17:08Z'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?changes-since=2011-01-24T17:08Z')
self._detail_request(filters, request)
def test_image_detail_filter_with_type(self):
filters = {'property-image_type': 'BASE'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?type=BASE')
self._detail_request(filters, request)
def test_image_detail_filter_not_supported(self):
filters = {'status': 'active'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?status='
'ACTIVE&UNSUPPORTEDFILTER=testname')
self._detail_request(filters, request)
def test_image_detail_no_filters(self):
filters = {}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
self._detail_request(filters, request)
def test_image_detail_invalid_marker(self):
class InvalidImageService(object):
def detail(self, *args, **kwargs):
raise exception.Invalid('meow')
request = fakes.HTTPRequest.blank('/v2/images?marker=invalid')
controller = images.Controller(image_service=InvalidImageService())
self.assertRaises(webob.exc.HTTPBadRequest, controller.detail, request)
def test_generate_alternate_link(self):
view = images_view.ViewBuilder()
request = fakes.HTTPRequest.blank('/v2/fake/images/1')
generated_url = view._get_alternate_link(request, 1)
actual_url = "%s/fake/images/1" % glance.generate_glance_url()
self.assertEqual(generated_url, actual_url)
def test_delete_image(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/124')
request.method = 'DELETE'
response = self.controller.delete(request, '124')
self.assertEqual(response.status_int, 204)
def test_delete_deleted_image(self):
"""If you try to delete a deleted image, you get back 403 Forbidden."""
deleted_image_id = 128
# see nova.tests.api.openstack.fakes:_make_image_fixtures
request = fakes.HTTPRequest.blank(
'/v2/fake/images/%s' % deleted_image_id)
request.method = 'DELETE'
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, '%s' % deleted_image_id)
def test_delete_image_not_found(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/300')
request.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, request, '300')
class ImageXMLSerializationTest(test.NoDBTestCase):
TIMESTAMP = "2010-10-11T10:30:22Z"
SERVER_UUID = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
SERVER_HREF = 'http://localhost/v2/fake/servers/' + SERVER_UUID
SERVER_BOOKMARK = 'http://localhost/fake/servers/' + SERVER_UUID
IMAGE_HREF = 'http://localhost/v2/fake/images/%s'
IMAGE_NEXT = 'http://localhost/v2/fake/images?limit=%s&marker=%s'
IMAGE_BOOKMARK = 'http://localhost/fake/images/%s'
def test_xml_declaration(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_show(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'minRam': 10,
'minDisk': 100,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status', 'progress']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_zero_metadata(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
meta_nodes = root.findall('{0}meta'.format(ATOMNS))
self.assertEqual(len(meta_nodes), 0)
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_image_no_metadata_key(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
meta_nodes = root.findall('{0}meta'.format(ATOMNS))
self.assertEqual(len(meta_nodes), 0)
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_no_server(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root, None)
def test_show_with_min_ram(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'minRam': 256,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
'minRam']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_with_min_disk(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'minDisk': 5,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
'minDisk']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index(self):
serializer = images.MinimalImagesTemplate()
fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
{
'id': 2,
'name': 'Image2',
'links': [
{
'href': self.IMAGE_HREF % 2,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
},
],
},
]
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images_index')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 2)
for i, image_elem in enumerate(image_elems):
image_dict = fixture['images'][i]
for key in ['name', 'id']:
self.assertEqual(image_elem.get(key), str(image_dict[key]))
link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index_with_links(self):
serializer = images.MinimalImagesTemplate()
fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
{
'id': 2,
'name': 'Image2',
'links': [
{
'href': self.IMAGE_HREF % 2,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
},
],
},
],
'images_links': [
{
'rel': 'next',
'href': self.IMAGE_NEXT % (2, 2),
}
],
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images_index')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 2)
for i, image_elem in enumerate(image_elems):
image_dict = fixture['images'][i]
for key in ['name', 'id']:
self.assertEqual(image_elem.get(key), str(image_dict[key]))
link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
# Check images_links
images_links = root.findall('{0}link'.format(ATOMNS))
for i, link in enumerate(fixture['images_links']):
for key, value in link.items():
self.assertEqual(images_links[i].get(key), value)
def test_index_zero_images(self):
serializer = images.MinimalImagesTemplate()
fixtures = {
'images': [],
}
output = serializer.serialize(fixtures)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images_index')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 0)
def test_detail(self):
serializer = images.ImagesTemplate()
fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
{
'id': '2',
'name': 'Image2',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'SAVING',
'progress': 80,
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 2,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
},
],
},
]
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 2)
for i, image_elem in enumerate(image_elems):
image_dict = fixture['images'][i]
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(image_elem.get(key), str(image_dict[key]))
link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
| apache-2.0 | -1,888,554,433,275,398,700 | 37.985646 | 79 | 0.460972 | false |
JAOSP/aosp_platform_external_chromium_org | tools/code_coverage/process_coverage.py | 51 | 14310 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to clean the lcov files and convert it to HTML
TODO(niranjan): Add usage information here
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
# These are source files that were generated during compile time. We want to
# remove references to these files from the lcov file otherwise genhtml will
# throw an error.
win32_srcs_exclude = ['parse.y',
'xpathgrammar.cpp',
'cssgrammar.cpp',
'csspropertynames.gperf']
# Number of lines of a new coverage data set
# to send at a time to the dashboard.
POST_CHUNK_SIZE = 50
# Number of post request failures to allow before exiting.
MAX_FAILURES = 5
def CleanPathNames(dir):
"""Clean the pathnames of the HTML generated by genhtml.
This method is required only for code coverage on Win32. Due to a known issue
with reading from CIFS shares mounted on Linux, genhtml appends a ^M to every
file name it reads from the Windows share, causing corrupt filenames in
genhtml's output folder.
Args:
dir: Output folder of the genhtml output.
Returns:
None
"""
# Stip off the ^M characters that get appended to the file name
for dirpath, dirname, filenames in os.walk(dir):
for file in filenames:
file_clean = file.replace('\r', '')
if file_clean != file:
os.rename(file, file_clean)
def GenerateHtml(lcov_path, dash_root):
"""Runs genhtml to convert lcov data to human readable HTML.
This script expects the LCOV file name to be in the format:
chrome_<platform>_<revision#>.lcov.
This method parses the file name and then sets up the correct folder
hierarchy for the coverage data and then runs genhtml to get the actual HTML
formatted coverage data.
Args:
lcov_path: Path of the lcov data file.
dash_root: Root location of the dashboard.
Returns:
Code coverage percentage on sucess.
None on failure.
"""
# Parse the LCOV file name.
filename = os.path.basename(lcov_path).split('.')[0]
buffer = filename.split('_')
dash_root = dash_root.rstrip('/') # Remove trailing '/'
# Set up correct folder hierarchy in the dashboard root
# TODO(niranjan): Check the formatting using a regexp
if len(buffer) >= 3: # Check if filename has right formatting
platform = buffer[len(buffer) - 2]
revision = buffer[len(buffer) - 1]
if os.path.exists(os.path.join(dash_root, platform)) == False:
os.mkdir(os.path.join(dash_root, platform))
output_dir = os.path.join(dash_root, platform, revision)
os.mkdir(output_dir)
else:
# TODO(niranjan): Add failure logging here.
return None # File not formatted correctly
# Run genhtml
os.system('/usr/bin/genhtml -o %s %s' % (output_dir, lcov_path))
# TODO(niranjan): Check the exit status of the genhtml command.
# TODO(niranjan): Parse the stdout and return coverage percentage.
CleanPathNames(output_dir)
return 'dummy' # TODO(niranjan): Return actual percentage.
def CleanWin32Lcov(lcov_path, src_root):
"""Cleanup the lcov data generated on Windows.
This method fixes up the paths inside the lcov file from the Win32 specific
paths to the actual paths of the mounted CIFS share. The lcov files generated
on Windows have the following format:
SF:c:\chrome_src\src\skia\sgl\skscan_antihair.cpp
DA:97,0
DA:106,0
DA:107,0
DA:109,0
...
end_of_record
This method changes the source-file (SF) lines to a format compatible with
genhtml on Linux by fixing paths. This method also removes references to
certain dynamically generated files to be excluded from the code ceverage.
Args:
lcov_path: Path of the Win32 lcov file to be cleaned.
src_root: Location of the source and symbols dir.
Returns:
None
"""
strip_flag = False
lcov = open(lcov_path, 'r')
loc_csv_file = open(lcov_path + '.csv', 'w')
(tmpfile_id, tmpfile_name) = tempfile.mkstemp()
tmpfile = open(tmpfile_name, 'w')
src_root = src_root.rstrip('/') # Remove trailing '/'
for line in lcov:
if line.startswith('SF'):
# We want to exclude certain auto-generated files otherwise genhtml will
# fail to convert lcov to HTML.
for exp in win32_srcs_exclude:
if line.rfind(exp) != -1:
strip_flag = True # Indicates that we want to remove this section
# Now we normalize the paths
# e.g. Change SF:c:\foo\src\... to SF:/chrome_src/...
parse_buffer = line.split(':')
buffer = '%s:%s%s' % (parse_buffer[0],
src_root,
parse_buffer[2])
buffer = buffer.replace('\\', '/')
line = buffer.replace('\r', '')
# We want an accurate count of the lines of code in a given file so that
# we can estimate the code coverage perscentage accurately. We use a
# third party script cloc.pl which gives that count and then just parse
# its command line output to filter out the other unnecessary data.
# TODO(niranjan): Find out a better way of doing this.
buffer = buffer.lstrip('SF:')
file_for_loc = buffer.replace('\r\n', '')
# TODO(niranjan): Add a check to see if cloc is present on the machine.
command = ["perl",
"cloc.pl",
file_for_loc]
output = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
if output.rfind('error:'):
return None
tmp_buf1 = output.split('=')
tmp_buf2 = tmp_buf1[len(tmp_buf1) - 2].split('x')[0].split(' ')
loc = tmp_buf2[len(tmp_buf2) - 2]
loc_csv_file.write('%s,%s\r\n' % (file_for_loc, loc))
# Write to the temp file if the section to write is valid
if strip_flag == False:
# Also write this to the 'clean' LCOV file
tmpfile.write('%s' % (line))
# Reset the strip flag
if line.endswith('end_of_record'):
strip_flag = False
# Close the files and replace the lcov file by the 'clean' tmpfile
tmpfile.close()
lcov.close()
loc_csv_file.close()
shutil.move(tmpfile_name, lcov_path)
def ParseCoverageDataForDashboard(lcov_path):
"""Parse code coverage data into coverage results per source node.
Use lcov and linecount data to create a map of source nodes to
corresponding total and tested line counts.
Args:
lcov_path: File path to lcov coverage data.
Returns:
List of strings with comma separated source node and coverage.
"""
results = {}
linecount_path = lcov_path + '.csv'
assert(os.path.exists(linecount_path),
'linecount csv does not exist at: %s' % linecount_path)
csv_file = open(linecount_path, 'r')
linecounts = csv_file.readlines()
csv_file.close()
lcov_file = open(lcov_path, 'r')
srcfile_index = 0
for line in lcov_file:
line = line.strip()
# Set the current srcfile name for a new src file declaration.
if line[:len('SF:')] == 'SF:':
instrumented_set = {}
executed_set = {}
srcfile_name = line[len('SF:'):]
# Mark coverage data points hashlist style for the current src file.
if line[:len('DA:')] == 'DA:':
line_info = line[len('DA:'):].split(',')
assert(len(line_info) == 2, 'DA: line format unexpected - %s' % line)
(line_num, line_was_executed) = line_info
instrumented_set[line_num] = True
# line_was_executed is '0' or '1'
if int(line_was_executed):
executed_set[line_num] = True
# Update results for the current src file at record end.
if line == 'end_of_record':
instrumented = len(instrumented_set.keys())
executed = len(executed_set.keys())
parent_directory = srcfile_name[:srcfile_name.rfind('/') + 1]
linecount_point = linecounts[srcfile_index].strip().split(',')
assert(len(linecount_point) == 2,
'lintcount format unexpected - %s' % linecounts[srcfile_index])
(linecount_path, linecount_count) = linecount_point
srcfile_index += 1
# Sanity check that path names in the lcov and linecount are lined up.
if linecount_path[-10:] != srcfile_name[-10:]:
print 'NAME MISMATCH: %s :: %s' % (srcfile_name, linecount_path)
if instrumented > int(linecount_count):
linecount_count = instrumented
# Keep counts the same way that it is done in the genhtml utility.
# Count the coverage of a file towards the file,
# the parent directory, and the source root.
AddResults(results, srcfile_name, int(linecount_count), executed)
AddResults(results, parent_directory, int(linecount_count), executed)
AddResults(results, '/', instrumented, executed)
lcov_file.close()
keys = results.keys()
keys.sort()
# The first key (sorted) will be the base directory '/'
# but its full path may be '/mnt/chrome_src/src/'
# using this offset will ignore the part '/mnt/chrome_src/src'.
# Offset is the last '/' that isn't the last character for the
# first directory name in results (position 1 in keys).
offset = len(keys[1][:keys[1][:-1].rfind('/')])
lines = []
for key in keys:
if len(key) > offset:
node_path = key[offset:]
else:
node_path = key
(total, covered) = results[key]
percent = float(covered) * 100 / total
lines.append('%s,%.2f' % (node_path, percent))
return lines
def AddResults(results, location, lines_total, lines_executed):
"""Add resulting line tallies to a location's total.
Args:
results: Map of node location to corresponding coverage data.
location: Source node string.
lines_total: Number of lines to add to the total count for this node.
lines_executed: Number of lines to add to the executed count for this node.
"""
if results.has_key(location):
(i, e) = results[location]
results[location] = (i + lines_total, e + lines_executed)
else:
results[location] = (lines_total, lines_executed)
def PostResultsToDashboard(lcov_path, results, post_url):
"""Post coverage results to coverage dashboard.
Args:
lcov_path: File path for lcov data in the expected format:
<project>_<platform>_<cl#>.coverage.lcov
results: string list in the appropriate posting format.
"""
project_platform_cl = lcov_path.split('.')[0].split('_')
assert(len(project_platform_cl) == 3,
'lcov_path not in expected format: %s' % lcov_path)
(project, platform, cl_string) = project_platform_cl
project_name = '%s-%s' % (project, platform)
url = '%s/newdata.do?project=%s&cl=%s' % (post_url, project_name, cl_string)
# Send POSTs of POST_CHUNK_SIZE lines of the result set until
# there is no more data and last_loop is set to True.
last_loop = False
cur_line = 0
while not last_loop:
body = '\n'.join(results[cur_line:cur_line + POST_CHUNK_SIZE])
cur_line += POST_CHUNK_SIZE
last_loop = (cur_line >= len(results))
req = urllib2.Request('%s&last=%s' % (url, str(last_loop)), body)
req.add_header('Content-Type', 'text/plain')
SendPost(req)
# Global counter for the current number of request failures.
num_fails = 0
def SendPost(req):
"""Execute a post request and retry for up to MAX_FAILURES.
Args:
req: A urllib2 request object.
Raises:
URLError: If urlopen throws after too many retries.
HTTPError: If urlopen throws after too many retries.
"""
global num_fails
try:
urllib2.urlopen(req)
# Reset failure count.
num_fails = 0
except (urllib2.URLError, urllib2.HTTPError):
num_fails += 1
if num_fails < MAX_FAILURES:
print 'fail, retrying (%d)' % num_fails
time.sleep(5)
SendPost(req)
else:
print 'POST request exceeded allowed retries.'
raise
def main():
if not sys.platform.startswith('linux'):
print 'This script is supported only on Linux'
return 0
# Command line parsing
parser = optparse.OptionParser()
parser.add_option('-p',
'--platform',
dest='platform',
default=None,
help=('Platform that the locv file was generated on. Must'
'be one of {win32, linux2, linux3, macosx}'))
parser.add_option('-s',
'--source',
dest='src_dir',
default=None,
help='Path to the source code and symbols')
parser.add_option('-d',
'--dash_root',
dest='dash_root',
default=None,
help='Root directory for the dashboard')
parser.add_option('-l',
'--lcov',
dest='lcov_path',
default=None,
help='Location of the LCOV file to process')
parser.add_option('-u',
'--post_url',
dest='post_url',
default=None,
help='Base URL of the coverage dashboard')
(options, args) = parser.parse_args()
if options.platform == None:
parser.error('Platform not specified')
if options.lcov_path == None:
parser.error('lcov file path not specified')
if options.src_dir == None:
parser.error('Source directory not specified')
if options.dash_root == None:
parser.error('Dashboard root not specified')
if options.post_url == None:
parser.error('Post URL not specified')
if options.platform == 'win32':
CleanWin32Lcov(options.lcov_path, options.src_dir)
percent = GenerateHtml(options.lcov_path, options.dash_root)
if percent == None:
# TODO(niranjan): Add logging.
print 'Failed to generate code coverage'
return 1
else:
# TODO(niranjan): Do something with the code coverage numbers
pass
else:
print 'Unsupported platform'
return 1
# Prep coverage results for dashboard and post new set.
parsed_data = ParseCoverageDataForDashboard(options.lcov_path)
PostResultsToDashboard(options.lcov_path, parsed_data, options.post_url)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 4,272,056,129,556,521,000 | 33.64891 | 79 | 0.643606 | false |
pyhedgehog/hook.io-sdk-python | tests/test_cli.py | 1 | 2861 | #!/usr/bin/env python
import os
import sys
import pytest
import logging
import runpy
import hookio.runclient
log = logging.getLogger(__name__)
def test_cli_empty(capsys):
pytest.raises(SystemExit, hookio.runclient.main, ['-'])
out, err = capsys.readouterr()
assert not out
assert 'usage:' in err
assert 'too few arguments' in err
def test_cli_runpy(capsys):
saved_argv = sys.argv[:]
try:
dirname = os.path.dirname(hookio.__file__)
sys.argv[:] = [os.path.join(dirname, 'runclient.py')]
pytest.raises(SystemExit, runpy.run_module, 'hookio.runclient', run_name='__main__')
out, err = capsys.readouterr()
assert not out
assert 'usage:' in err
assert 'too few arguments' in err
modname = 'hookio'
if sys.version_info[:2] <= (2, 6):
modname = 'hookio.__main__'
sys.argv[:] = [os.path.join(dirname, '__main__.py')]
pytest.raises(SystemExit, runpy.run_module, modname, run_name='__main__')
out, err = capsys.readouterr()
assert not out
assert 'usage:' in err
assert 'too few arguments' in err
finally:
sys.argv[:] = saved_argv
def test_cli_help(capsys):
pytest.raises(SystemExit, hookio.runclient.main, ['-', '-h'])
out, err = capsys.readouterr()
assert not err
assert 'usage:' in out
assert 'subcommands:' in out
sub = out.split('subcommands:')[1]
assert ' hook ' in sub
assert ' datastore ' in sub
def test_cli_hook_empty(capsys):
pytest.raises(SystemExit, hookio.runclient.main, ['-', 'hook'])
out, err = capsys.readouterr()
assert not out
out = err.replace('\r', '\n')
assert 'usage:' in out
assert ' hook\n' in out or ' hook [-h]' in out
assert 'too few arguments' in out
def test_cli_hook_help(capsys):
pytest.raises(SystemExit, hookio.runclient.main, ['-', 'hook', '-h'])
out, err = capsys.readouterr()
assert not err
assert 'usage:' in out
assert 'subcommands:' in out
sub = out.split('subcommands:')[1]
assert ' run ' in sub
assert ' create ' in sub
assert ' destroy ' in sub
def test_cli_datastore_empty(capsys):
pytest.raises(SystemExit, hookio.runclient.main, ['-', 'datastore'])
out, err = capsys.readouterr()
assert not out
out = err.replace('\r', '\n')
assert 'usage:' in out
assert ' datastore\n' in out or ' datastore [-h]' in out
assert 'too few arguments' in out
def test_cli_datastore_help(capsys):
pytest.raises(SystemExit, hookio.runclient.main, ['-', 'datastore', '-h'])
out, err = capsys.readouterr()
assert not err
assert 'usage:' in out
assert 'subcommands:' in out
sub = out.split('subcommands:')[1]
assert ' recent ' in sub
assert ' set ' in sub
assert ' get ' in sub
assert ' del ' in sub
| unlicense | -2,773,399,889,693,449,700 | 28.802083 | 92 | 0.616218 | false |
andykimpe/chromium-test-npapi | tools/perf/page_sets/indexeddb_offline.py | 9 | 1972 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
def _CreateXpathFunction(xpath):
return ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue' % re.escape(xpath))
class IndexeddbOfflinePage(page_module.Page):
""" Why: Simulates user input while offline and sync while online. """
def __init__(self, page_set):
super(IndexeddbOfflinePage, self).__init__(
url='file://endure/indexeddb_app.html',
page_set=page_set,
name='indexeddb_offline')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForElement(text='initialized')
def RunEndure(self, action_runner):
action_runner.WaitForElement('button[id="online"]:not(disabled)')
action_runner.ClickElement('button[id="online"]:not(disabled)')
action_runner.WaitForElement(
element_function=_CreateXpathFunction('id("state")[text()="online"]'))
action_runner.Wait(1)
action_runner.WaitForElement('button[id="offline"]:not(disabled)')
action_runner.ClickElement('button[id="offline"]:not(disabled)')
action_runner.WaitForElement(
element_function=_CreateXpathFunction('id("state")[text()="offline"]'))
class IndexeddbOfflinePageSet(page_set_module.PageSet):
""" Chrome Endure test for IndexedDB. """
def __init__(self):
super(IndexeddbOfflinePageSet, self).__init__(
user_agent_type='desktop')
self.AddPage(IndexeddbOfflinePage(self))
| bsd-3-clause | 2,658,128,098,395,092,000 | 33.596491 | 79 | 0.670892 | false |
pombredanne/cubes | cubes/mapper.py | 7 | 3872 | # -*- coding: utf-8 -*-
"""Logical to Physical Mappers"""
import collections
from .logging import get_logger
from .errors import *
__all__ = (
"Mapper",
)
class Mapper(object):
"""Mapper is core class for translating logical model to physical database
schema.
"""
# WARNING: do not put any SQL/engine/connection related stuff into this
# class yet. It might be moved to the cubes as one of top-level modules
# and subclassed here.
def __init__(self, cube, locale=None, **naming):
"""Abstract class for mappers which maps logical references to
physical references (tables and columns).
Attributes:
* `cube` - mapped cube
* `fact_name` – fact name, if not specified then `cube.name` is used
* `schema` – default database schema
"""
super(Mapper, self).__init__()
if cube == None:
raise Exception("Cube for mapper should not be None.")
self.logger = get_logger()
self.cube = cube
self.mappings = self.cube.mappings
self.locale = locale
# TODO: remove this (should be in SQL only)
self._collect_attributes()
def _collect_attributes(self):
"""Collect all cube attributes and create a dictionary where keys are
logical references and values are `cubes.model.Attribute` objects.
This method should be used after each cube or mappings change.
"""
self.attributes = collections.OrderedDict()
for attr in self.cube.all_fact_attributes:
self.attributes[self.logical(attr)] = attr
def set_locale(self, locale):
"""Change the mapper's locale"""
self.locale = locale
self._collect_attributes()
# TODO: depreciate in favor of Cube.all_attributes
def all_attributes(self, expand_locales=False):
"""Return a list of all attributes of a cube. If `expand_locales` is
``True``, then localized logical reference is returned for each
attribute's locale."""
return self.attributes.values()
# TODO: depreciate in favor of Cube.attribute
def attribute(self, name):
"""Returns an attribute with logical reference `name`. """
# TODO: If attribute is not found, returns `None` (yes or no?)
return self.attributes[name]
# TODO: is this necessary after removing of 'simplify'? Reconsider
# requirement for existence of this one.
def logical(self, attribute, locale=None):
"""Returns logical reference as string for `attribute` in `dimension`.
If `dimension` is ``Null`` then fact table is assumed. The logical
reference might have following forms:
* ``dimension.attribute`` - dimension attribute
* ``attribute`` - fact measure or detail
If `locale` is specified, then locale is added to the reference. This
is used by backends and other mappers, it has no real use in end-user
browsing.
"""
reference = attribute.localized_ref(locale)
return reference
def split_logical(self, reference):
"""Returns tuple (`dimension`, `attribute`) from `logical_reference` string. Syntax
of the string is: ``dimensions.attribute``."""
split = reference.split(".")
if len(split) > 1:
dim_name = split[0]
attr_name = ".".join(split[1:])
return (dim_name, attr_name)
else:
return (None, reference)
def physical(self, attribute, locale=None):
"""Returns physical reference for attribute. Returned value is backend
specific. Default implementation returns a value from the mapping
dictionary.
This method should be implemented by `Mapper` subclasses.
"""
return self.mappings.get(self.logical(attribute, locale))
| mit | 6,382,085,092,239,933,000 | 31.233333 | 91 | 0.631851 | false |
vjpai/grpc | tools/run_tests/artifacts/artifact_targets.py | 4 | 16848 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
extra_docker_args=None,
verbose_success=False):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.10'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
if 'manylinux' in platform:
self.labels.append('linux')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Crosscompilation build for armv7 (e.g. Raspberry Pi)
environ['PYTHON'] = '/opt/python/{}/bin/python3'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip3'.format(self.py_version)
environ['GRPC_SKIP_PIP_CYTHON_UPGRADE'] = 'TRUE'
environ['GRPC_SKIP_TWINE_CHECK'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_linux_{}'.format(
self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60)
elif 'manylinux' in self.platform:
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
environ['GRPC_SKIP_PIP_CYTHON_UPGRADE'] = 'TRUE'
if self.arch == 'aarch64':
environ['GRPC_SKIP_TWINE_CHECK'] = 'TRUE'
else:
# only run auditwheel if we're not crosscompiling
environ['GRPC_RUN_AUDITWHEEL_REPAIR'] = 'TRUE'
# only build the packages that depend on grpcio-tools
# if we're not crosscompiling.
# - they require protoc to run on current architecture
# - they only have sdist packages anyway, so it's useless to build them again
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_%s_%s' %
(self.platform, self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 2)
elif self.platform == 'windows':
if 'Python27' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
use_workspace=True,
timeout_seconds=60 * 60)
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch, arch_abi=None):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.arch_abi = arch_abi
self.labels = ['artifact', 'csharp', platform, arch]
if arch_abi:
self.name += '_%s' % arch_abi
self.labels.append(arch_abi)
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.arch == 'android':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_android_ndk',
'tools/run_tests/artifacts/build_artifact_csharp_android.sh',
environ={'ANDROID_ABI': self.arch_abi})
elif self.arch == 'ios':
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp_ios.sh'],
timeout_seconds=60 * 60,
use_workspace=True)
elif self.platform == 'windows':
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
self.arch
],
timeout_seconds=45 * 60,
use_workspace=True)
else:
if self.platform == 'linux':
dockerfile_dir = 'tools/dockerfile/grpc_artifact_centos6_{}'.format(
self.arch)
if self.arch == 'aarch64':
# for aarch64, use a dockcross manylinux image that will
# give us both ready to use crosscompiler and sufficient backward compatibility
dockerfile_dir = 'tools/dockerfile/grpc_artifact_python_manylinux_2_24_aarch64'
return create_docker_jobspec(
self.name, dockerfile_dir,
'tools/run_tests/artifacts/build_artifact_csharp.sh')
else:
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp.sh'],
timeout_seconds=45 * 60,
use_workspace=True)
def __str__(self):
return self.name
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/test/php73_zts_stretch_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
environ = {'CXXFLAGS': '', 'LDFLAGS': ''}
if self.platform == 'linux':
dockerfile_dir = 'tools/dockerfile/grpc_artifact_centos6_{}'.format(
self.arch)
if self.arch == 'aarch64':
# for aarch64, use a dockcross manylinux image that will
# give us both ready to use crosscompiler and sufficient backward compatibility
dockerfile_dir = 'tools/dockerfile/grpc_artifact_protoc_aarch64'
environ['LDFLAGS'] += ' -static-libgcc -static-libstdc++ -s'
return create_docker_jobspec(
self.name,
dockerfile_dir,
'tools/run_tests/artifacts/build_artifact_protoc.sh',
environ=environ)
else:
environ[
'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_protoc.sh'],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True)
else:
generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
return create_jobspec(
self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
environ={'generator': generator},
use_workspace=True)
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
ProtocArtifact('linux', 'x64'),
ProtocArtifact('linux', 'x86'),
ProtocArtifact('linux', 'aarch64'),
ProtocArtifact('macos', 'x64'),
ProtocArtifact('windows', 'x64'),
ProtocArtifact('windows', 'x86'),
CSharpExtArtifact('linux', 'x64'),
CSharpExtArtifact('linux', 'aarch64'),
CSharpExtArtifact('macos', 'x64'),
CSharpExtArtifact('windows', 'x64'),
CSharpExtArtifact('windows', 'x86'),
CSharpExtArtifact('linux', 'android', arch_abi='arm64-v8a'),
CSharpExtArtifact('linux', 'android', arch_abi='armeabi-v7a'),
CSharpExtArtifact('linux', 'android', arch_abi='x86'),
CSharpExtArtifact('macos', 'ios'),
PythonArtifact('manylinux2014', 'x64', 'cp35-cp35m'),
PythonArtifact('manylinux2014', 'x64', 'cp36-cp36m'),
PythonArtifact('manylinux2014', 'x64', 'cp37-cp37m'),
PythonArtifact('manylinux2014', 'x64', 'cp38-cp38'),
PythonArtifact('manylinux2014', 'x64', 'cp39-cp39'),
PythonArtifact('manylinux2014', 'x86', 'cp35-cp35m'),
PythonArtifact('manylinux2014', 'x86', 'cp36-cp36m'),
PythonArtifact('manylinux2014', 'x86', 'cp37-cp37m'),
PythonArtifact('manylinux2014', 'x86', 'cp38-cp38'),
PythonArtifact('manylinux2014', 'x86', 'cp39-cp39'),
PythonArtifact('manylinux2010', 'x64', 'cp27-cp27m'),
PythonArtifact('manylinux2010', 'x64', 'cp27-cp27mu'),
PythonArtifact('manylinux2010', 'x64', 'cp35-cp35m'),
PythonArtifact('manylinux2010', 'x64', 'cp36-cp36m'),
PythonArtifact('manylinux2010', 'x64', 'cp37-cp37m'),
PythonArtifact('manylinux2010', 'x64', 'cp38-cp38'),
PythonArtifact('manylinux2010', 'x64', 'cp39-cp39'),
PythonArtifact('manylinux2010', 'x86', 'cp27-cp27m'),
PythonArtifact('manylinux2010', 'x86', 'cp27-cp27mu'),
PythonArtifact('manylinux2010', 'x86', 'cp35-cp35m'),
PythonArtifact('manylinux2010', 'x86', 'cp36-cp36m'),
PythonArtifact('manylinux2010', 'x86', 'cp37-cp37m'),
PythonArtifact('manylinux2010', 'x86', 'cp38-cp38'),
PythonArtifact('manylinux2010', 'x86', 'cp39-cp39'),
PythonArtifact('manylinux_2_24', 'aarch64', 'cp36-cp36m'),
PythonArtifact('manylinux_2_24', 'aarch64', 'cp37-cp37m'),
PythonArtifact('manylinux_2_24', 'aarch64', 'cp38-cp38'),
PythonArtifact('manylinux_2_24', 'aarch64', 'cp39-cp39'),
PythonArtifact('linux_extra', 'armv7', 'cp36-cp36m'),
PythonArtifact('linux_extra', 'armv7', 'cp37-cp37m'),
PythonArtifact('linux_extra', 'armv7', 'cp38-cp38'),
PythonArtifact('linux_extra', 'armv7', 'cp39-cp39'),
PythonArtifact('macos', 'x64', 'python2.7'),
PythonArtifact('macos', 'x64', 'python3.5'),
PythonArtifact('macos', 'x64', 'python3.6'),
PythonArtifact('macos', 'x64', 'python3.7'),
PythonArtifact('macos', 'x64', 'python3.8'),
PythonArtifact('macos', 'x64', 'python3.9'),
PythonArtifact('windows', 'x86', 'Python27_32bit'),
PythonArtifact('windows', 'x86', 'Python35_32bit'),
PythonArtifact('windows', 'x86', 'Python36_32bit'),
PythonArtifact('windows', 'x86', 'Python37_32bit'),
PythonArtifact('windows', 'x86', 'Python38_32bit'),
PythonArtifact('windows', 'x86', 'Python39_32bit'),
PythonArtifact('windows', 'x64', 'Python27'),
PythonArtifact('windows', 'x64', 'Python35'),
PythonArtifact('windows', 'x64', 'Python36'),
PythonArtifact('windows', 'x64', 'Python37'),
PythonArtifact('windows', 'x64', 'Python38'),
PythonArtifact('windows', 'x64', 'Python39'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64')
]
| apache-2.0 | 5,100,993,569,728,586,000 | 40.70297 | 104 | 0.554962 | false |
Titosoft/ferry-boat | web2py/gluon/tests/test_cache.py | 18 | 2563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for gluon.cache
"""
import sys
import os
import unittest
def fix_sys_path():
"""
logic to have always the correct sys.path
'', web2py/gluon, web2py/site-packages, web2py/ ...
"""
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
from storage import Storage
from cache import CacheInRam, CacheOnDisk
oldcwd = None
def setUpModule():
global oldcwd
if oldcwd is None:
oldcwd = os.getcwd()
if not os.path.isdir('gluon'):
os.chdir(os.path.realpath('../../'))
def tearDownModule():
global oldcwd
if oldcwd:
os.chdir(oldcwd)
oldcwd = None
class TestCache(unittest.TestCase):
def testCacheInRam(self):
# defaults to mode='http'
cache = CacheInRam()
self.assertEqual(cache('a', lambda: 1, 0), 1)
self.assertEqual(cache('a', lambda: 2, 100), 1)
cache.clear('b')
self.assertEqual(cache('a', lambda: 2, 100), 1)
cache.clear('a')
self.assertEqual(cache('a', lambda: 2, 100), 2)
cache.clear()
self.assertEqual(cache('a', lambda: 3, 100), 3)
self.assertEqual(cache('a', lambda: 4, 0), 4)
def testCacheOnDisk(self):
# defaults to mode='http'
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = CacheOnDisk(s)
self.assertEqual(cache('a', lambda: 1, 0), 1)
self.assertEqual(cache('a', lambda: 2, 100), 1)
cache.clear('b')
self.assertEqual(cache('a', lambda: 2, 100), 1)
cache.clear('a')
self.assertEqual(cache('a', lambda: 2, 100), 2)
cache.clear()
self.assertEqual(cache('a', lambda: 3, 100), 3)
self.assertEqual(cache('a', lambda: 4, 0), 4)
if __name__ == '__main__':
setUpModule() # pre-python-2.7
unittest.main()
tearDownModule()
| mit | 633,755,781,617,428,500 | 24.376238 | 66 | 0.551697 | false |
labero/kupfer | kupfer/plugin/multihead.py | 4 | 1171 | # TRANS: Multihead refers to support for multiple computer displays
# TRANS: In this case, it only concerns the special configuration
# TRANS: with multiple X "screens"
__kupfer_name__ = _("Multihead Support")
__kupfer_sources__ = ()
__description__ = ("Will run the keyboard shortcut relay service on additional"
" X screens if needed.")
__version__ = ""
__author__ = ""
import os
import gtk
from kupfer import pretty
from kupfer import utils
child_pids = []
def initialize_plugin(name):
global pid
## check for multihead
display = gtk.gdk.display_get_default()
screen = display.get_default_screen()
if display.get_n_screens() > 1:
pretty.print_info(__name__, "Starting Multi X screen support")
for idx in xrange(display.get_n_screens()):
if idx != screen.get_number():
pretty.print_info(__name__, "Launching keyrelay for screen", idx)
screen_x = display.get_screen(idx)
# run helper without respawning it
pid = utils.start_plugin_helper("kupfer.keyrelay",
False, screen_x.make_display_name())
child_pids.append(pid)
def finalize_plugin(name):
for pid in child_pids:
os.kill(pid, 1)
child_pids[:] = []
| gpl-3.0 | 1,557,507,452,257,198,300 | 28.275 | 79 | 0.684031 | false |
TieWei/nova | nova/api/metadata/base.py | 7 | 17455 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Instance Metadata information."""
import base64
import json
import os
import posixpath
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.metadata import password
from nova import block_device
from nova.compute import flavors
from nova import conductor
from nova import context
from nova import network
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import netutils
metadata_opts = [
cfg.StrOpt('config_drive_skip_versions',
default=('1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 '
'2007-12-15 2008-02-01 2008-09-01'),
help=('List of metadata versions to skip placing into the '
'config drive')),
cfg.StrOpt('vendordata_driver',
default='nova.api.metadata.vendordata_json.JsonFileVendorData',
help='Driver to use for vendor data'),
]
CONF = cfg.CONF
CONF.register_opts(metadata_opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
VERSIONS = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
FOLSOM = '2012-08-10'
GRIZZLY = '2013-04-04'
HAVANA = '2013-10-17'
OPENSTACK_VERSIONS = [
FOLSOM,
GRIZZLY,
HAVANA,
]
CONTENT_DIR = "content"
MD_JSON_NAME = "meta_data.json"
VD_JSON_NAME = "vendor_data.json"
UD_NAME = "user_data"
PASS_NAME = "password"
LOG = logging.getLogger(__name__)
class InvalidMetadataVersion(Exception):
pass
class InvalidMetadataPath(Exception):
pass
class InstanceMetadata():
"""Instance metadata."""
def __init__(self, instance, address=None, content=None, extra_md=None,
conductor_api=None, network_info=None, vd_driver=None):
"""Creation of this object should basically cover all time consuming
collection. Methods after that should not cause time delays due to
network operations or lengthy cpu operations.
The user should then get a single instance and make multiple method
calls on it.
"""
if not content:
content = []
self.instance = instance
self.extra_md = extra_md
if conductor_api:
capi = conductor_api
else:
capi = conductor.API()
ctxt = context.get_admin_context()
self.availability_zone = ec2utils.get_availability_zone_by_host(
instance['host'], capi)
self.ip_info = ec2utils.get_ip_info_for_instance(ctxt, instance)
self.security_groups = capi.security_group_get_by_instance(ctxt,
instance)
self.mappings = _format_instance_mapping(capi, ctxt, instance)
if instance.get('user_data', None) is not None:
self.userdata_raw = base64.b64decode(instance['user_data'])
else:
self.userdata_raw = None
self.ec2_ids = capi.get_ec2_ids(ctxt, instance)
self.address = address
# expose instance metadata.
self.launch_metadata = utils.instance_meta(instance)
self.password = password.extract_password(instance)
self.uuid = instance.get('uuid')
self.content = {}
self.files = []
# get network info, and the rendered network template
if network_info is None:
network_info = network.API().get_instance_nw_info(ctxt,
instance)
self.network_config = None
cfg = netutils.get_injected_network_template(network_info)
if cfg:
key = "%04i" % len(self.content)
self.content[key] = cfg
self.network_config = {"name": "network_config",
'content_path': "/%s/%s" % (CONTENT_DIR, key)}
# 'content' is passed in from the configdrive code in
# nova/virt/libvirt/driver.py. Thats how we get the injected files
# (personalities) in. AFAIK they're not stored in the db at all,
# so are not available later (web service metadata time).
for (path, contents) in content:
key = "%04i" % len(self.content)
self.files.append({'path': path,
'content_path': "/%s/%s" % (CONTENT_DIR, key)})
self.content[key] = contents
if vd_driver is None:
vdclass = importutils.import_class(CONF.vendordata_driver)
else:
vdclass = vd_driver
self.vddriver = vdclass(instance=instance, address=address,
extra_md=extra_md, network_info=network_info)
def get_ec2_metadata(self, version):
if version == "latest":
version = VERSIONS[-1]
if version not in VERSIONS:
raise InvalidMetadataVersion(version)
hostname = self._get_hostname()
floating_ips = self.ip_info['floating_ips']
floating_ip = floating_ips and floating_ips[0] or ''
fmt_sgroups = [x['name'] for x in self.security_groups]
meta_data = {
'ami-id': self.ec2_ids['ami-id'],
'ami-launch-index': self.instance['launch_index'],
'ami-manifest-path': 'FIXME',
'instance-id': self.ec2_ids['instance-id'],
'hostname': hostname,
'local-ipv4': self.address,
'reservation-id': self.instance['reservation_id'],
'security-groups': fmt_sgroups}
# public keys are strangely rendered in ec2 metadata service
# meta-data/public-keys/ returns '0=keyname' (with no trailing /)
# and only if there is a public key given.
# '0=keyname' means there is a normally rendered dict at
# meta-data/public-keys/0
#
# meta-data/public-keys/ : '0=%s' % keyname
# meta-data/public-keys/0/ : 'openssh-key'
# meta-data/public-keys/0/openssh-key : '%s' % publickey
if self.instance['key_name']:
meta_data['public-keys'] = {
'0': {'_name': "0=" + self.instance['key_name'],
'openssh-key': self.instance['key_data']}}
if self._check_version('2007-01-19', version):
meta_data['local-hostname'] = hostname
meta_data['public-hostname'] = hostname
meta_data['public-ipv4'] = floating_ip
if False and self._check_version('2007-03-01', version):
# TODO(vish): store product codes
meta_data['product-codes'] = []
if self._check_version('2007-08-29', version):
instance_type = flavors.extract_flavor(self.instance)
meta_data['instance-type'] = instance_type['name']
if False and self._check_version('2007-10-10', version):
# TODO(vish): store ancestor ids
meta_data['ancestor-ami-ids'] = []
if self._check_version('2007-12-15', version):
meta_data['block-device-mapping'] = self.mappings
if 'kernel-id' in self.ec2_ids:
meta_data['kernel-id'] = self.ec2_ids['kernel-id']
if 'ramdisk-id' in self.ec2_ids:
meta_data['ramdisk-id'] = self.ec2_ids['ramdisk-id']
if self._check_version('2008-02-01', version):
meta_data['placement'] = {'availability-zone':
self.availability_zone}
if self._check_version('2008-09-01', version):
meta_data['instance-action'] = 'none'
data = {'meta-data': meta_data}
if self.userdata_raw is not None:
data['user-data'] = self.userdata_raw
return data
def get_ec2_item(self, path_tokens):
# get_ec2_metadata returns dict without top level version
data = self.get_ec2_metadata(path_tokens[0])
return find_path_in_tree(data, path_tokens[1:])
def get_openstack_item(self, path_tokens):
if path_tokens[0] == CONTENT_DIR:
if len(path_tokens) == 1:
raise KeyError("no listing for %s" % "/".join(path_tokens))
if len(path_tokens) != 2:
raise KeyError("Too many tokens for /%s" % CONTENT_DIR)
return self.content[path_tokens[1]]
version = path_tokens[0]
if version == "latest":
version = OPENSTACK_VERSIONS[-1]
if version not in OPENSTACK_VERSIONS:
raise InvalidMetadataVersion(version)
path = '/'.join(path_tokens[1:])
if len(path_tokens) == 1:
# request for /version, give a list of what is available
ret = [MD_JSON_NAME]
if self.userdata_raw is not None:
ret.append(UD_NAME)
if self._check_os_version(GRIZZLY, version):
ret.append(PASS_NAME)
if self._check_os_version(HAVANA, version):
ret.append(VD_JSON_NAME)
return ret
if path == UD_NAME:
if self.userdata_raw is None:
raise KeyError(path)
return self.userdata_raw
if path == PASS_NAME and self._check_os_version(GRIZZLY, version):
return password.handle_password
if path == VD_JSON_NAME and self._check_os_version(HAVANA, version):
return json.dumps(self.vddriver.get())
if path != MD_JSON_NAME:
raise KeyError(path)
metadata = {}
metadata['uuid'] = self.uuid
if self.launch_metadata:
metadata['meta'] = self.launch_metadata
if self.files:
metadata['files'] = self.files
if self.extra_md:
metadata.update(self.extra_md)
if self.launch_metadata:
metadata['meta'] = self.launch_metadata
if self.network_config:
metadata['network_config'] = self.network_config
if self.instance['key_name']:
metadata['public_keys'] = {
self.instance['key_name']: self.instance['key_data']
}
metadata['hostname'] = self._get_hostname()
metadata['name'] = self.instance['display_name']
metadata['launch_index'] = self.instance['launch_index']
metadata['availability_zone'] = self.availability_zone
if self._check_os_version(GRIZZLY, version):
metadata['random_seed'] = base64.b64encode(os.urandom(512))
data = {
MD_JSON_NAME: json.dumps(metadata),
}
return data[path]
def _check_version(self, required, requested, versions=VERSIONS):
return versions.index(requested) >= versions.index(required)
def _check_os_version(self, required, requested):
return self._check_version(required, requested, OPENSTACK_VERSIONS)
def _get_hostname(self):
return "%s%s%s" % (self.instance['hostname'],
'.' if CONF.dhcp_domain else '',
CONF.dhcp_domain)
def lookup(self, path):
if path == "" or path[0] != "/":
path = posixpath.normpath("/" + path)
else:
path = posixpath.normpath(path)
# fix up requests, prepending /ec2 to anything that does not match
path_tokens = path.split('/')[1:]
if path_tokens[0] not in ("ec2", "openstack"):
if path_tokens[0] == "":
# request for /
path_tokens = ["ec2"]
else:
path_tokens = ["ec2"] + path_tokens
path = "/" + "/".join(path_tokens)
# all values of 'path' input starts with '/' and have no trailing /
# specifically handle the top level request
if len(path_tokens) == 1:
if path_tokens[0] == "openstack":
# NOTE(vish): don't show versions that are in the future
today = timeutils.utcnow().strftime("%Y-%m-%d")
versions = [v for v in OPENSTACK_VERSIONS if v <= today]
if OPENSTACK_VERSIONS != versions:
LOG.debug(_("future versions %s hidden in version list"),
[v for v in OPENSTACK_VERSIONS
if v not in versions])
versions += ["latest"]
else:
versions = VERSIONS + ["latest"]
return versions
try:
if path_tokens[0] == "openstack":
data = self.get_openstack_item(path_tokens[1:])
else:
data = self.get_ec2_item(path_tokens[1:])
except (InvalidMetadataVersion, KeyError):
raise InvalidMetadataPath(path)
return data
def metadata_for_config_drive(self):
"""Yields (path, value) tuples for metadata elements."""
# EC2 style metadata
for version in VERSIONS + ["latest"]:
if version in CONF.config_drive_skip_versions.split(' '):
continue
data = self.get_ec2_metadata(version)
if 'user-data' in data:
filepath = os.path.join('ec2', version, 'user-data')
yield (filepath, data['user-data'])
del data['user-data']
try:
del data['public-keys']['0']['_name']
except KeyError:
pass
filepath = os.path.join('ec2', version, 'meta-data.json')
yield (filepath, json.dumps(data['meta-data']))
ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"]
for version in ALL_OPENSTACK_VERSIONS:
path = 'openstack/%s/%s' % (version, MD_JSON_NAME)
yield (path, self.lookup(path))
path = 'openstack/%s/%s' % (version, UD_NAME)
if self.userdata_raw is not None:
yield (path, self.lookup(path))
if self._check_version(HAVANA, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
yield (path, self.lookup(path))
for (cid, content) in self.content.iteritems():
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
class VendorDataDriver(object):
"""The base VendorData Drivers should inherit from."""
def __init__(self, *args, **kwargs):
"""Init method should do all expensive operations."""
self._data = {}
def get(self):
"""Return a dictionary of primitives to be rendered in metadata
:return: A dictionary or primitives.
"""
return self._data
def get_metadata_by_address(conductor_api, address):
ctxt = context.get_admin_context()
fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)
return get_metadata_by_instance_id(conductor_api,
fixed_ip['instance_uuid'],
address,
ctxt)
def get_metadata_by_instance_id(conductor_api, instance_id, address,
ctxt=None):
ctxt = ctxt or context.get_admin_context()
instance = conductor_api.instance_get_by_uuid(ctxt, instance_id)
return InstanceMetadata(instance, address)
def _format_instance_mapping(conductor_api, ctxt, instance):
bdms = conductor_api.block_device_mapping_get_all_by_instance(
ctxt, instance)
return block_device.instance_block_mapping(instance, bdms)
def ec2_md_print(data):
if isinstance(data, dict):
output = ''
for key in sorted(data.keys()):
if key == '_name':
continue
if isinstance(data[key], dict):
if '_name' in data[key]:
output += str(data[key]['_name'])
else:
output += key + '/'
else:
output += key
output += '\n'
return output[:-1]
elif isinstance(data, list):
return '\n'.join(data)
else:
return str(data)
def find_path_in_tree(data, path_tokens):
# given a dict/list tree, and a path in that tree, return data found there.
for i in range(0, len(path_tokens)):
if isinstance(data, dict) or isinstance(data, list):
if path_tokens[i] in data:
data = data[path_tokens[i]]
else:
raise KeyError("/".join(path_tokens[0:i]))
else:
if i != len(path_tokens) - 1:
raise KeyError("/".join(path_tokens[0:i]))
data = data[path_tokens[i]]
return data
| apache-2.0 | -8,308,570,618,604,748,000 | 33.428008 | 79 | 0.570667 | false |
techtonik/readthedocs.org | readthedocs/restapi/views/search_views.py | 4 | 7293 | import logging
from rest_framework import decorators, permissions, status
from rest_framework.renderers import JSONPRenderer, JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex
from readthedocs.restapi import utils
log = logging.getLogger(__name__)
@decorators.api_view(['POST'])
@decorators.permission_classes((permissions.IsAdminUser,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def index_search(request):
"""
Add things to the search index.
"""
data = request.DATA['data']
version_pk = data['version_pk']
commit = data.get('commit')
version = Version.objects.get(pk=version_pk)
project_scale = 1
page_scale = 1
utils.index_search_request(
version=version, page_list=data['page_list'], commit=commit,
project_scale=project_scale, page_scale=page_scale)
return Response({'indexed': True})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def search(request):
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
if project_slug is None or query is None:
return Response({'error': 'Need project and q'}, status=status.HTTP_400_BAD_REQUEST)
log.debug("(API Search) %s" % query)
kwargs = {}
body = {
"query": {
"function_score": {
"field_value_factor": {"field": "weight"},
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"headers": {"query": query, "boost": 5}}},
{"match": {"content": {"query": query}}},
]
}
}
}
},
"highlight": {
"fields": {
"title": {},
"headers": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path"],
"size": 50 # TODO: Support pagination.
}
if project_slug:
body['filter'] = {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
}
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
results = PageIndex().search(body, **kwargs)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def project_search(request):
query = request.GET.get('q', None)
if query is None:
return Response({'error': 'Need project and q'}, status=status.HTTP_400_BAD_REQUEST)
log.debug("(API Project Search) %s" % (query))
body = {
"query": {
"function_score": {
"field_value_factor": {"field": "weight"},
"query": {
"bool": {
"should": [
{"match": {"name": {"query": query, "boost": 10}}},
{"match": {"description": {"query": query}}},
]
}
}
}
},
"fields": ["name", "slug", "description", "lang"]
}
results = ProjectIndex().search(body)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def section_search(request):
"""
Search for a Section of content on Read the Docs.
A Section is a subheading on a specific page.
Query Thoughts
--------------
If you want to search across all documents, just query with a ``q`` GET arg.
If you want to filter by a specific project, include a ``project`` GET arg.
Facets
------
When you search, you will have a ``project`` facet, which includes the
number of matching sections per project. When you search inside a project,
the ``path`` facet will show the number of matching sections per page.
Possible GET args
-----------------
* q - The query string **Required**
* project - A project slug *Optional*
* version - A version slug *Optional*
* path - A file path slug *Optional*
Example
-------
GET /api/v2/search/section/?q=virtualenv&project=django
Current Query
-------------
"""
query = request.GET.get('q', None)
if not query:
return Response(
{'error': 'Search term required. Use the "q" GET arg to search. '},
status=status.HTTP_400_BAD_REQUEST)
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
path_slug = request.GET.get('path', None)
log.debug("(API Section Search) [%s:%s] %s" % (project_slug, version_slug, query))
kwargs = {}
body = {
"query": {
"function_score": {
"field_value_factor": {"field": "weight"},
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"content": {"query": query}}},
]
}
}
}
},
"facets": {
"project": {
"terms": {"field": "project"},
"facet_filter": {
"term": {"version": version_slug},
}
},
},
"highlight": {
"fields": {
"title": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path", "page_id", "content"],
"size": 10 # TODO: Support pagination.
}
if project_slug:
body['filter'] = {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
}
body['facets']['path'] = {
"terms": {"field": "path"},
"facet_filter": {
"term": {"project": project_slug},
}
},
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
if path_slug:
body['filter'] = {
"and": [
{"term": {"path": path_slug}},
]
}
if path_slug and not project_slug:
# Show facets when we only have a path
body['facets']['path'] = {
"terms": {"field": "path"}
}
results = SectionIndex().search(body, **kwargs)
return Response({'results': results})
| mit | -5,960,257,018,852,270,000 | 30.034043 | 92 | 0.517483 | false |
atupal/ffbird | jinja2/nodes.py | 7 | 28857 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| apache-2.0 | 8,431,153,268,877,897,000 | 30.57221 | 81 | 0.600582 | false |
benEnsta/pyIbex | pyibex/thickset/geoimage_utils.py | 1 | 1738 | from pyibex import IntervalVector, Interval
from pyibex.thickset import * #PavingVisitor, IN, OUT, MAYBE_IN, MAYBE_OUT, UNK, MAYBE, EMPTY, ThickPaving, ThickDisk, ToVibes
from pyibex.thickset.thickimage import GeoImage, GeoMapper, ThickGeoImage
import numpy as np
import math
def grayToThickImg(gray_img, in_val=255, maybe_val=128, out_val=0 ):
""" Compute Integral image to X- and X+ sets
Args:
gray_img( np.array ): gray image with only three value
255 : pixels which belong to X-
128 : pixels which belong to X+
0 : pixels outside X+
"""
img_in = np.zeros(gray_img.shape, dtype=np.uint64)
img_in[gray_img == in_val] = 1
img_in = img_in.cumsum(0).cumsum(1)
img_out = np.zeros(gray_img.shape, dtype=np.uint64)
img_out[gray_img >= maybe_val] = 1
img_out = img_out.cumsum(0).cumsum(1)
return (img_in, img_out)
#
# if __name__ == '__main__':
#
# from vibes import vibes
# res = 0.002
# t = ThickDisk(Interval(0), Interval(0), Interval(0, 2), Interval(0,2))
# X0 = IntervalVector(2, [-10,10])
# P = ThickPaving(IntervalVector(2, [-10,10]), t, 0.01, opInter)
# # exportAsPNG(P, 0.01, -0.01, "test2.png")
# vibes.beginDrawing()
#
# thickTest2Img(t, X0, res, -res, "test2.png")
#
#
# P.visit(ToVibes(1000, 'test'))
# # exportAsPNG(P,0.005, -0.005)
# #
# img = misc.imread("test2.png", 0)
# img_in, img_out = grayToThickImg(img)
# timg = ThickGeoImage(img_in, img_out, -10,10, res, -res)
# #
# test = lambda x: opRestrict(timg.test(x), timg.test(x))
# test = lambda x: opRestrict(t.test(x), timg.test(x))
# P = ThickPaving(IntervalVector(2, [-10,10]), test, 0.01, opInter)
# # P.Sivia(t, 0.005, opRestrict)
# P.visit(ToVibes(1000, 'test2'))
#
# vibes.endDrawing()
| lgpl-3.0 | 9,104,594,568,389,917,000 | 30.035714 | 126 | 0.644994 | false |
RealTimeWeb/geocode | python/docs/conf.py | 1 | 8103 | # -*- coding: utf-8 -*-
#
# earthquakeservice documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 31 13:27:50 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../geocode/'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'geocode'
copyright = u'2014, acbart'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'earthquakeservicedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'earthquakeservice.tex', u'earthquakeservice Documentation',
u'acbart', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'earthquakeservice', u'earthquakeservice Documentation',
[u'acbart'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'earthquakeservice', u'earthquakeservice Documentation',
u'acbart', 'earthquakeservice', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 5,218,196,954,392,506,000 | 31.412 | 80 | 0.707146 | false |
DEGoodmanWilson/bacon | plugins/pdf.py | 1 | 4403 | #note that this plugin assumes that posts will be in HTML or XHTML format...
from Plugin import Plugin
import sys
import Config
from HTMLParser import HTMLParser
import string
image_base = '/home/dgoodman/www'
class StoryParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.parsedbuffer = ''
def reset(self):
HTMLParser.reset(self)
self.parsedbuffer = ''
def translate(self, data):
data = string.replace(data, '&', '&')
data = string.replace(data, '&', '\\&')
return data
def handle_entityref(self, e):
if e == 'amp': self.parsedbuffer += '\\&'
elif e == 'mdash': self.parsedbuffer += '--'
def handle_charref(self, e):
if e == '36': self.parsedbuffer += '\\$'
def handle_starttag(self, tag, attrs):
r = ''
if tag == 'q':
r = "``"
elif tag == 'p':
r = ''
elif tag == 'ul':
r = '\\begin{itemize}\n'
elif tag == 'ol':
r = '\\begin{enumerate}\n'
elif tag == 'li':
r = '\\item '
elif tag == 'em' or tag == 'i':
r = '\\textit{'
elif tag == 'strong' or tag == 'bold':
r = '\\textbf{'
elif tag == 'a':
for a in attrs:
if a[0] == 'href':
self.href = a[1]
if self.href[0] == '/': self.href = Config.url+self.href
r = '\\href{'+self.href+'}{'
elif tag == 'img':
for a in attrs:
if a[0] == 'src':
src = a[1]
if src[0] == '/':
r += '\\includegraphics[]{'+image_base+src+'}'
self.parsedbuffer+=r
def handle_startendtag(self, tag, attrs):
if tag == 'img':
for a in attrs:
if a[0] == 'src':
src = a[1]
if src[0] == '/':
self.parsedbuffer += '\\includegraphics[]{'+image_base+src+'}'
def handle_endtag(self, tag):
r = ''
if tag == 'q':
r = "''"
elif tag == 'p':
r = '\n'
elif tag == 'ul':
r = '\\end{itemize}'
elif tag == 'ol':
r = '\\end{enumerate}'
elif tag == 'li':
r = '\n'
elif tag == 'em' or tag == 'i' or tag == 'strong' or tag == 'em':
r = '}'
elif tag == 'strong' or tag == 'bold':
r = '\\textbf{'
elif tag == 'a':
r = '}\\footnote{\url{'+self.translate(self.href)+'}}'
self.parsedbuffer+=r
def handle_data(self, data):
#catch rogue ampersands
data = data.replace("&", "\\&")
self.parsedbuffer+=data
class pdf(Plugin):
type = "pdf"
def __init__(self, form, blog):
self.active = False
self.blog = blog
if blog.flavour == 'pdf':
self.active = True
import commands
self.pdflatex = commands.getoutput('which pdflatex')
if 'no pdflatex in' in self.pdflatex:
self.active = False
return
import os.path, os
self.uid = commands.getoutput('uuidgen')
self.path = os.path.join(Config.plugin_state_path, self.type)
if not os.path.exists(self.path):
os.makedirs(self.path)
self.outfile = file(os.path.join(self.path,self.uid+'.latex'), 'w')
self.tempout = sys.stdout
sys.stdout = self.outfile
self.toremove = []
def postLoadContent(self):
if self.active == True:
#TODO processes stories here
for story in self.blog.children['stories']:
p = StoryParser()
p.feed(story.title)
story.title = p.parsedbuffer
p.close()
q = StoryParser()
q.feed(story.body)
story.body = q.parsedbuffer
q.close()
#TODO process children here....
def postRender(self):
if self.active == True:
import os
import commands
self.outfile.close()
sys.stdout = self.tempout
try:
commands.getoutput('cd '+self.path+';'+self.pdflatex+' '+self.uid+'.latex')
except:
os.remove(os.path.join(self.path,self.uid+'.latex'))
return
infile = file(os.path.join(self.path,self.uid+'.pdf'))
lines = infile.readlines()
infile.close()
for line in lines:
sys.stdout.write(line)
os.remove(os.path.join(self.path,self.uid+'.latex'))
os.remove(os.path.join(self.path,self.uid+'.aux'))
os.remove(os.path.join(self.path,self.uid+'.log'))
os.remove(os.path.join(self.path,self.uid+'.out'))
os.remove(os.path.join(self.path,self.uid+'.pdf'))
#TODO remove image files, if any
__pdf = pdf(opts['form'], blog)
plugins.append(__pdf)
| gpl-2.0 | -1,929,480,661,829,009,000 | 27.967105 | 83 | 0.546218 | false |
shoyer/xray | xarray/tutorial.py | 1 | 2729 | '''
Useful for:
* users learning xarray
* building tutorials in the documentation.
'''
import hashlib
import os as _os
from urllib.request import urlretrieve
from .backends.api import open_dataset as _open_dataset
_default_cache_dir = _os.sep.join(('~', '.xarray_tutorial_data'))
def file_md5_checksum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
hash_md5.update(f.read())
return hash_md5.hexdigest()
# idea borrowed from Seaborn
def open_dataset(name, cache=True, cache_dir=_default_cache_dir,
github_url='https://github.com/pydata/xarray-data',
branch='master', **kws):
"""
Open a dataset from the online repository (requires internet).
If a local copy is found then always use that to avoid network traffic.
Parameters
----------
name : str
Name of the netcdf file containing the dataset
ie. 'air_temperature'
cache_dir : string, optional
The directory in which to search for and write cached data.
cache : boolean, optional
If True, then cache data locally for use on subsequent calls
github_url : string
Github repository where the data is stored
branch : string
The git branch to download from
kws : dict, optional
Passed to xarray.open_dataset
See Also
--------
xarray.open_dataset
"""
longdir = _os.path.expanduser(cache_dir)
fullname = name + '.nc'
localfile = _os.sep.join((longdir, fullname))
md5name = name + '.md5'
md5file = _os.sep.join((longdir, md5name))
if not _os.path.exists(localfile):
# This will always leave this directory on disk.
# May want to add an option to remove it.
if not _os.path.isdir(longdir):
_os.mkdir(longdir)
url = '/'.join((github_url, 'raw', branch, fullname))
urlretrieve(url, localfile)
url = '/'.join((github_url, 'raw', branch, md5name))
urlretrieve(url, md5file)
localmd5 = file_md5_checksum(localfile)
with open(md5file, 'r') as f:
remotemd5 = f.read()
if localmd5 != remotemd5:
_os.remove(localfile)
msg = """
MD5 checksum does not match, try downloading dataset again.
"""
raise IOError(msg)
ds = _open_dataset(localfile, **kws)
if not cache:
ds = ds.load()
_os.remove(localfile)
return ds
def load_dataset(*args, **kwargs):
"""
Open, load into memory, and close a dataset from the online repository
(requires internet).
See Also
--------
open_dataset
"""
with open_dataset(*args, **kwargs) as ds:
return ds.load()
| apache-2.0 | 7,904,196,227,685,292,000 | 26.019802 | 75 | 0.608281 | false |
supertom/ansible-modules-core | network/nxos/nxos_ospf_vrf.py | 10 | 20754 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_ospf_vrf
version_added: "2.2"
short_description: Manages a VRF for an OSPF router.
description:
- Manages a VRF for an OSPF router.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- Value I(default) restores params default value, if any.
Otherwise it removes the existing param configuration.
options:
vrf:
description:
- Name of the resource instance. Valid value is a string.
The name 'default' is a valid VRF representing the global OSPF.
required: false
default: default
ospf:
description:
- Name of the OSPF instance.
required: true
default: null
router_id:
description:
- Router Identifier (ID) of the OSPF router VRF instance.
required: false
default: null
default_metric:
description:
- Specify the default Metric value. Valid values are an integer
or the keyword 'default'.
required: false
default: null
log_adjacency:
description:
- Controls the level of log messages generated whenever a
neighbor changes state. Valid values are 'log', 'detail',
and 'default'.
required: false
choices: ['log','detail','default']
default: null
timer_throttle_lsa_start:
description:
- Specify the start interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
required: false
default: null
timer_throttle_lsa_hold:
description:
- Specify the hold interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
required: false
default: null
timer_throttle_lsa_max:
description:
- Specify the max interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
required: false
default: null
timer_throttle_spf_start:
description:
- Specify initial Shortest Path First (SPF) schedule delay.
Valid values are an integer, in milliseconds, or
the keyword 'default'.
required: false
default: null
timer_throttle_spf_hold:
description:
- Specify minimum hold time between Shortest Path First (SPF)
calculations. Valid values are an integer, in milliseconds,
or the keyword 'default'.
required: false
default: null
timer_throttle_spf_max:
description:
- Specify the maximum wait time between Shortest Path First (SPF)
calculations. Valid values are an integer, in milliseconds,
or the keyword 'default'.
required: false
default: null
auto_cost:
description:
- Specifies the reference bandwidth used to assign OSPF cost.
Valid values are an integer, in Mbps, or the keyword 'default'.
required: false
default: null
'''
EXAMPLES = '''
- nxos_ospf_vrf:
ospf: 1
timer_throttle_spf_start: 50
timer_throttle_spf_hold: 1000
timer_throttle_spf_max: 2000
timer_throttle_lsa_start: 60
timer_throttle_lsa_hold: 1100
timer_throttle_lsa_max: 3000
vrf: test
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ospf": "1", "timer_throttle_lsa_hold": "1100",
"timer_throttle_lsa_max": "3000", "timer_throttle_lsa_start": "60",
"timer_throttle_spf_hold": "1000",
"timer_throttle_spf_max": "2000", "timer_throttle_spf_start": "50",
"vrf": "test"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"auto_cost": "40000", "default_metric": "", "log_adjacency": "",
"ospf": "1", "router_id": "", "timer_throttle_lsa_hold": "5000",
"timer_throttle_lsa_max": "5000", "timer_throttle_lsa_start": "0",
"timer_throttle_spf_hold": "1000",
"timer_throttle_spf_max": "5000",
"timer_throttle_spf_start": "200", "vrf": "test"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"auto_cost": "40000", "default_metric": "", "log_adjacency": "",
"ospf": "1", "router_id": "", "timer_throttle_lsa_hold": "1100",
"timer_throttle_lsa_max": "3000", "timer_throttle_lsa_start": "60",
"timer_throttle_spf_hold": "1000",
"timer_throttle_spf_max": "2000", "timer_throttle_spf_start": "50",
"vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router ospf 1", "vrf test", "timers throttle lsa 60 1100 3000",
"timers throttle spf 50 1000 2000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
PARAM_TO_COMMAND_KEYMAP = {
'router_id': 'router-id',
'default_metric': 'default-metric',
'log_adjacency': 'log-adjacency-changes',
'timer_throttle_lsa_start': 'timers throttle lsa',
'timer_throttle_lsa_max': 'timers throttle lsa',
'timer_throttle_lsa_hold': 'timers throttle lsa',
'timer_throttle_spf_max': 'timers throttle spf',
'timer_throttle_spf_start': 'timers throttle spf',
'timer_throttle_spf_hold': 'timers throttle spf',
'auto_cost': 'auto-cost reference-bandwidth'
}
PARAM_TO_DEFAULT_KEYMAP = {
'timer_throttle_lsa_start': '0',
'timer_throttle_lsa_max': '5000',
'timer_throttle_lsa_hold': '5000',
'timer_throttle_spf_start': '200',
'timer_throttle_spf_max': '5000',
'timer_throttle_spf_hold': '1000',
'auto_cost': '40000'
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
if arg == 'log_adjacency':
if 'log-adjacency-changes detail' in config:
value = 'detail'
else:
value = 'log'
else:
value_list = REGEX.search(config).group('value').split()
if 'hold' in arg:
value = value_list[1]
elif 'max' in arg:
value = value_list[2]
elif 'auto' in arg:
if 'Gbps' in value_list:
value = str(int(value_list[0]) * 1000)
else:
value = value_list[0]
else:
value = value_list[0]
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
config = netcfg.get_section(parents)
if config:
if module.params['vrf'] == 'default':
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
for arg in args:
if arg not in ['ospf', 'vrf']:
existing[arg] = get_value(arg, config, module)
existing['vrf'] = module.params['vrf']
existing['ospf'] = module.params['ospf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'timers throttle lsa':
command = '{0} {1} {2} {3}'.format(
key,
proposed['timer_throttle_lsa_start'],
proposed['timer_throttle_lsa_hold'],
proposed['timer_throttle_lsa_max'])
elif key == 'timers throttle spf':
command = '{0} {1} {2} {3}'.format(
key,
proposed['timer_throttle_spf_start'],
proposed['timer_throttle_spf_hold'],
proposed['timer_throttle_spf_max'])
elif key == 'log-adjacency-changes':
if value == 'log':
command = key
elif value == 'detail':
command = '{0} {1}'.format(key, value)
elif key == 'auto-cost reference-bandwidth':
if len(value) < 5:
command = '{0} {1} Mbps'.format(key, value)
else:
value = str(int(value) / 1000)
command = '{0} {1} Gbps'.format(key, value)
else:
command = '{0} {1}'.format(key, value.lower())
if command not in commands:
commands.append(command)
if commands:
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] == 'default':
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in existing_commands.iteritems():
if value:
if key == 'timers throttle lsa':
command = 'no {0} {1} {2} {3}'.format(
key,
existing['timer_throttle_lsa_start'],
existing['timer_throttle_lsa_hold'],
existing['timer_throttle_lsa_max'])
elif key == 'timers throttle spf':
command = 'no {0} {1} {2} {3}'.format(
key,
existing['timer_throttle_spf_start'],
existing['timer_throttle_spf_hold'],
existing['timer_throttle_spf_max'])
else:
existing_value = existing_commands.get(key)
command = 'no {0} {1}'.format(key, existing_value)
if command not in commands:
commands.append(command)
else:
commands = ['no vrf {0}'.format(module.params['vrf'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=False, type='str', default='default'),
ospf=dict(required=True, type='str'),
router_id=dict(required=False, type='str'),
default_metric=dict(required=False, type='str'),
log_adjacency=dict(required=False, type='str',
choices=['log', 'detail', 'default']),
timer_throttle_lsa_start=dict(required=False, type='str'),
timer_throttle_lsa_hold=dict(required=False, type='str'),
timer_throttle_lsa_max=dict(required=False, type='str'),
timer_throttle_spf_start=dict(required=False, type='str'),
timer_throttle_spf_hold=dict(required=False, type='str'),
timer_throttle_spf_max=dict(required=False, type='str'),
auto_cost=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
args = [
'vrf',
'ospf',
'router_id',
'default_metric',
'log_adjacency',
'timer_throttle_lsa_start',
'timer_throttle_lsa_hold',
'timer_throttle_lsa_max',
'timer_throttle_spf_start',
'timer_throttle_spf_hold',
'timer_throttle_spf_max',
'auto_cost'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,463,217,349,500,837,000 | 34.116751 | 93 | 0.547413 | false |
appknox/AFE | internals/lib/basecmd.py | 3 | 2893 | #!/usr/bin/env python
# encoding: utf-8
import cmd
import os
import readline
import rlcompleter
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
class BaseCmd(cmd.Cmd):
def __init__(self, session):
cmd.Cmd.__init__(self)
self.ruler = "-"
self.doc_header = "Commands - type help <command> for more info"
self.session = session
self._hist = [] ## No history yet
self._locals = {} ## Initialize execution namespace for user
self._globals = {}
self.cmdline = None
## Command definitions to support Cmd object functionality ##
def do_help(self, args):
"""
Get help on commands
'help' or '?' with no arguments prints a list of commands for which help is available
'help <command>' or '? <command>' gives help on <command>
"""
## The only reason to define this method is for the help text in the doc string
cmd.Cmd.do_help(self, args)
## Override methods in Cmd object ##
def preloop(self):
"""
Initialization before prompting user for commands.
Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub.
"""
cmd.Cmd.preloop(self) ## sets up command completion
self._hist = [] ## No history yet
self._locals = {} ## Initialize execution namespace for user
self._globals = {}
def postloop(self):
"""
Take care of any unfinished business.
Despite the claims in the Cmd documentaion, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) ## Clean up command completion
def precmd(self, line):
"""
This method is called after the line has been input but before
it has been interpreted. If you want to modifdy the input line
before execution (for example, variable substitution) do it here.
"""
self._hist += [ line.strip() ]
return line
def postcmd(self, stop, line):
"""
If you want to stop the console, return something that evaluates to true.
If you want to do some post command processing, do it here.
"""
return stop
def emptyline(self):
"""
Do nothing on empty input line
"""
pass
def default(self, line):
"""
Called on an input line when the command prefix is not recognized.
"""
print "Command not found\n"
def do_shell(self, args):
"""Pass command to a system shell when line begins with '!'"""
os.system(args)
def do_clear(self, line):
"""
This command clears the screen or the terminal window!
"""
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def do_quit(self, line):
"""
This command exits to the terminal window!
"""
sys.exit(0)
| gpl-3.0 | -4,520,084,536,532,713,000 | 28.824742 | 87 | 0.607328 | false |
faint32/MSpider | function/dbengine.py | 11 | 5628 | #!/usr/bin/env python
# coding:utf-8
# manning 2015-3-29
import os
import time
import sqlite3
import gevent
import hashlib
import Queue
def set_db_folder(name):
db_dir = os.getcwd() + '/database/'
db_name = time.strftime("%Y-%m-%d", time.localtime()) # 2015-01-20
folder = db_dir + str(db_name) + '-' + name[0]
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def init_db(name,model):
folder = set_db_folder(name)
if model == 0:
try:
conn = sqlite3.connect(folder+'/complete.db')
sql_creat_table = '''
create table if not exists info(
id integer primary key autoincrement,
http_url varchar(256) DEFAULT NULL,
time varchar(50) DEFAULT NULL,
backup_1 varchar(30) DEFAULT NULL,
backup_2 varchar(30) DEFAULT NULL,
backup_3 varchar(30) DEFAULT NULL,
backup_4 varchar(30) DEFAULT NULL
)'''
conn.execute(sql_creat_table)
conn.close()
return [str(folder+'/complete.db'),'']
except Exception, e:
print '---'+str(e)+'--'
elif model == 1:
try:
conn = sqlite3.connect(folder+'/smart.db')
sql_creat_table = '''
create table if not exists info(
id integer primary key autoincrement,
http_url varchar(256) DEFAULT NULL,
time varchar(50) DEFAULT NULL,
backup_1 varchar(30) DEFAULT NULL,
backup_2 varchar(30) DEFAULT NULL,
backup_3 varchar(30) DEFAULT NULL,
backup_4 varchar(30) DEFAULT NULL
)'''
conn.execute(sql_creat_table)
conn.close()
return ['',str(folder+'/smart.db')]
except Exception, e:
print '---'+str(e)+'--'
elif model == 2:
try:
conn = sqlite3.connect(folder+'/complete.db')
sql_creat_table = '''
create table if not exists info(
id integer primary key autoincrement,
http_url varchar(256) DEFAULT NULL,
time varchar(50) DEFAULT NULL,
backup_1 varchar(30) DEFAULT NULL
)'''
conn.execute(sql_creat_table)
conn.close()
except Exception, e:
print '---'+str(e)+'--'
time.sleep(1)
try:
conn = sqlite3.connect(folder+'/smart.db')
sql_creat_table = '''
create table if not exists info(
id integer primary key autoincrement,
http_url varchar(256) DEFAULT NULL,
time varchar(50) DEFAULT NULL,
backup_1 varchar(30) DEFAULT NULL
)'''
conn.execute(sql_creat_table)
conn.close()
return [str(folder+'/complete.db'),str(folder+'/smart.db')]
except Exception, e:
print '---'+str(e)+'--'
def engine_db(dbname,complete_queue,smart_queue,model):
connlist = init_db(dbname,model)
#connlist[0] complete
#connlist[1] smart
if model == 0:#complete
conn1 = sqlite3.connect(connlist[0])
while True:
try:
if not complete_queue.empty() and not complete_queue.full():
t = complete_queue.get()
sql_insert_data = '''insert into info(http_url,time,backup_1) values ('%s', '%s', '%s')'''%(t.url,t.time,'')
conn1.execute(sql_insert_data)
conn1.commit()
except Exception, e:
print '---'+str(e)+'--'
pass
elif model == 1:#smart
conn2 = sqlite3.connect(connlist[1])
while True:
try:
if not smart_queue.empty() and not smart_queue.full():
t = smart_queue.get()
sql_insert_data = '''insert into info(http_url,time,backup_1) values ('%s', '%s', '%s')'''%(t.url,t.time,'')
conn2.execute(sql_insert_data)
conn2.commit()
except Exception, e:
print '---'+str(e)+'--'
pass
elif model == 2:#two
conn1 = sqlite3.connect(connlist[0])
conn2 = sqlite3.connect(connlist[1])
while True:
try:
if not complete_queue.empty() and not complete_queue.full():
t1 = complete_queue.get()
sql_insert_data = '''insert into info(http_url,time,backup_1) values ('%s', '%s', '%s')'''%(t1.url,t1.time,'')
conn1.execute(sql_insert_data)
conn1.commit()
except Exception, e:
print '---'+str(e)+'--'
pass
time.sleep(0.1)
try:
if not smart_queue.empty() and not smart_queue.full():
t2 = smart_queue.get()
sql_insert_data = '''insert into info(http_url,time,backup_1) values ('%s', '%s', '%s')'''%(t2.url,t2.time,'')
conn2.execute(sql_insert_data)
conn2.commit()
except Exception, e:
print '---'+str(e)+'--'
pass
if __name__ == "__main__":
init_db('levt') | gpl-2.0 | 3,505,176,095,380,545,000 | 38.090278 | 130 | 0.469616 | false |
jordanemedlock/psychtruths | temboo/core/Library/Google/Plus/Domains/Circles/Get.py | 5 | 5627 | # -*- coding: utf-8 -*-
###############################################################################
#
# Get
# Retrieves a specific circle.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Get(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Get Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Get, self).__init__(temboo_session, '/Library/Google/Plus/Domains/Circles/Get')
def new_input_set(self):
return GetInputSet()
def _make_result_set(self, result, path):
return GetResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetChoreographyExecution(session, exec_id, path)
class GetInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Get
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(GetInputSet, self)._set_input('AccessToken', value)
def set_Callback(self, value):
"""
Set the value of the Callback input for this Choreo. ((optional, string) Specifies a JavaScript function that will be passed the response data for using the API with JSONP.)
"""
super(GetInputSet, self)._set_input('Callback', value)
def set_CircleID(self, value):
"""
Set the value of the CircleID input for this Choreo. ((required, string) The ID of the circle to get.)
"""
super(GetInputSet, self)._set_input('CircleID', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(GetInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(GetInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Used to specify fields to include in a partial response. This can be used to reduce the amount of data returned. See documentation for syntax rules.)
"""
super(GetInputSet, self)._set_input('Fields', value)
def set_PrettyPrint(self, value):
"""
Set the value of the PrettyPrint input for this Choreo. ((optional, boolean) A flag used to pretty print the JSON response to make it more readable. Defaults to "true".)
"""
super(GetInputSet, self)._set_input('PrettyPrint', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(GetInputSet, self)._set_input('RefreshToken', value)
def set_UserIP(self, value):
"""
Set the value of the UserIP input for this Choreo. ((optional, string) Identifies the IP address of the end user for whom the API call is being made. Used to enforce per-user quotas.)
"""
super(GetInputSet, self)._set_input('UserIP', value)
class GetResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Get Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class GetChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetResultSet(response, path)
| apache-2.0 | -726,109,733,821,143,900 | 43.307087 | 254 | 0.667141 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.