repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
victorbriz/rethinkdb | external/v8_3.30.33.16/build/gyp/test/mac/gyptest-xctest.py | 221 | 1196 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
| agpl-3.0 |
jackru/pybrain | examples/optimization/multiobjective/constnsga2jpq.py | 25 | 2336 | from __future__ import print_function
#!/usr/bin/env python
""" An illustration of using the NSGA-II multi-objective optimization algorithm
on Constrained Multi-Objective Optimization benchmark function. """
__author__ = 'Jean Pierre Queau, [email protected]'
from pybrain.optimization import ConstMultiObjectiveGA
from pybrain.rl.environments.functions.multiobjective import ConstDeb,ConstSrn, \
ConstOsy,ConstTnk,ConstBnh
import pylab
from scipy import zeros, array
# The Deb function
#f = ConstDeb()
# The Srinivas & Deb function
#f = ConstSrn()
# The Osyczka & Kundu function
#f = ConstOsy()
# The Tanaka function
#f = ConstTnk()
# The Binh & Korn function
f = ConstBnh()
# start at the origin
x0 = zeros(f.indim)
x0 = array([min_ for min_, max_ in f.xbound])
# the optimization for a maximum of 25 generations
n = ConstMultiObjectiveGA(f, x0, storeAllEvaluations = True, populationSize = 100, eliteProportion = 1.0,
topProportion = 1.0, mutationProb = 1.0, mutationStdDev = 0.3, storeAllPopulations = True, allowEquality = False)
print('Start Learning')
n.learn(50)
print('End Learning')
# plotting the results (blue = all evaluated points, red = resulting pareto front)
print('Plotting the Results')
print('All Evaluations.... take some time')
for x in n._allEvaluations:
if x[1]:
pylab.plot([x[0][0]], [x[0][1]], 'b.')
else:
pylab.plot([x[0][0]], [x[0][1]], 'r.')
for x in n.bestEvaluation: pylab.plot([x[0][0]], [x[0][1]], 'go')
pylab.show()
print('Pareto Front')
for x in n.bestEvaluation: pylab.plot([x[0][0]], [x[0][1]], 'go')
pylab.show()
print('===========')
print('= Results =')
print('===========')
'''
i=0
for gen in n._allGenerations:
print 'Generation: ',i
for j in range(len(gen[1])):
print gen[1].keys()[j],gen[1].values()[j]
i+=1
'''
print('Population size ',n.populationSize)
print('Elitism Proportion ',n.eliteProportion)
print('Mutation Probability ',n.mutationProb)
print('Mutation Std Deviation ',n.mutationStdDev)
print('Objective Evaluation number ',n.numEvaluations)
print('last generation Length of bestEvaluation ',len(n.bestEvaluation))
print('Best Evaluable : Best Evaluation')
for i in range(len(n.bestEvaluation)):
assert len(n.bestEvaluation) == len(n.bestEvaluable)
print(n.bestEvaluable[i],':',n.bestEvaluation[i]) | bsd-3-clause |
mrkm4ntr/incubator-airflow | tests/api_connexion/endpoints/test_dag_run_endpoint.py | 7 | 36001 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
from parameterized import parameterized
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models import DagModel, DagRun
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.session import create_session, provide_session
from airflow.utils.types import DagRunType
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs
class TestDagRunEndpoint(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
],
)
create_user(
cls.app, # type: ignore
username="test_granular_permissions",
role_name="TestGranularDag",
permissions=[(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN)],
)
cls.app.appbuilder.sm.sync_perm_for_dag( # type: ignore # pylint: disable=no-member
"TEST_DAG_ID",
access_control={'TestGranularDag': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_granular_permissions") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
def setUp(self) -> None:
self.client = self.app.test_client() # type:ignore
self.default_time = "2020-06-11T18:00:00+00:00"
self.default_time_2 = "2020-06-12T18:00:00+00:00"
clear_db_runs()
clear_db_dags()
def tearDown(self) -> None:
clear_db_runs()
# clear_db_dags()
def _create_test_dag_run(self, state='running', extra_dag=False, commit=True):
dag_runs = []
dags = [DagModel(dag_id="TEST_DAG_ID")]
dagrun_model_1 = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID_1",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
external_trigger=True,
state=state,
)
dag_runs.append(dagrun_model_1)
dagrun_model_2 = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID_2",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time_2),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
dag_runs.append(dagrun_model_2)
if extra_dag:
for i in range(3, 5):
dags.append(DagModel(dag_id='TEST_DAG_ID_' + str(i)))
dag_runs.append(
DagRun(
dag_id='TEST_DAG_ID_' + str(i),
run_id='TEST_DAG_RUN_ID_' + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time_2),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
)
if commit:
with create_session() as session:
session.add_all(dag_runs)
session.add_all(dags)
return dag_runs
class TestDeleteDagRun(TestDagRunEndpoint):
@provide_session
def test_should_respond_204(self, session):
session.add_all(self._create_test_dag_run())
session.commit()
response = self.client.delete(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1", environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 204)
# Check if the Dag Run is deleted from the database
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1", environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 404)
def test_should_respond_404(self):
response = self.client.delete(
"api/v1/dags/INVALID_DAG_RUN/dagRuns/INVALID_DAG_RUN", environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.json,
{
"detail": "DAGRun with DAG ID: 'INVALID_DAG_RUN' and DagRun ID: 'INVALID_DAG_RUN' not found",
"status": 404,
"title": "Not Found",
"type": EXCEPTIONS_LINK_MAP[404],
},
)
@provide_session
def test_should_raises_401_unauthenticated(self, session):
session.add_all(self._create_test_dag_run())
session.commit()
response = self.client.delete(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID_1",
)
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID",
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
class TestGetDagRun(TestDagRunEndpoint):
@provide_session
def test_should_respond_200(self, session):
dagrun_model = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
session.add(dagrun_model)
session.commit()
result = session.query(DagRun).all()
assert len(result) == 1
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected_response = {
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
}
assert response.json == expected_response
def test_should_respond_404(self):
response = self.client.get(
"api/v1/dags/invalid-id/dagRuns/invalid-id", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
expected_resp = {
'detail': "DAGRun with DAG ID: 'invalid-id' and DagRun ID: 'invalid-id' not found",
'status': 404,
'title': 'DAGRun not found',
'type': EXCEPTIONS_LINK_MAP[404],
}
assert expected_resp == response.json
@provide_session
def test_should_raises_401_unauthenticated(self, session):
dagrun_model = DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID",
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
session.add(dagrun_model)
session.commit()
response = self.client.get("api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID")
assert_401(response)
class TestGetDagRuns(TestDagRunEndpoint):
@provide_session
def test_should_respond_200(self, session):
self._create_test_dag_run()
result = session.query(DagRun).all()
assert len(result) == 2
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == {
"dag_runs": [
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_1',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_2',
'end_date': None,
'state': 'running',
'execution_date': self.default_time_2,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
],
"total_entries": 2,
}
def test_should_return_all_with_tilde_as_dag_id_and_all_dag_permissions(self):
self._create_test_dag_run(extra_dag=True)
expected_dag_run_ids = ['TEST_DAG_ID', 'TEST_DAG_ID', "TEST_DAG_ID_3", "TEST_DAG_ID_4"]
response = self.client.get("api/v1/dags/~/dagRuns", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_run_ids = [dag_run["dag_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_return_accessible_with_tilde_as_dag_id_and_dag_level_permissions(self):
self._create_test_dag_run(extra_dag=True)
expected_dag_run_ids = ['TEST_DAG_ID', 'TEST_DAG_ID']
response = self.client.get(
"api/v1/dags/~/dagRuns", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
dag_run_ids = [dag_run["dag_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_raises_401_unauthenticated(self):
self._create_test_dag_run()
response = self.client.get("api/v1/dags/TEST_DAG_ID/dagRuns")
assert_401(response)
class TestGetDagRunsPagination(TestDagRunEndpoint):
@parameterized.expand(
[
("api/v1/dags/TEST_DAG_ID/dagRuns?limit=1", ["TEST_DAG_RUN_ID1"]),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?limit=2",
["TEST_DAG_RUN_ID1", "TEST_DAG_RUN_ID2"],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?offset=5",
[
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?offset=0",
[
"TEST_DAG_RUN_ID1",
"TEST_DAG_RUN_ID2",
"TEST_DAG_RUN_ID3",
"TEST_DAG_RUN_ID4",
"TEST_DAG_RUN_ID5",
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
("api/v1/dags/TEST_DAG_ID/dagRuns?limit=1&offset=5", ["TEST_DAG_RUN_ID6"]),
("api/v1/dags/TEST_DAG_ID/dagRuns?limit=1&offset=1", ["TEST_DAG_RUN_ID2"]),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?limit=2&offset=2",
["TEST_DAG_RUN_ID3", "TEST_DAG_RUN_ID4"],
),
]
)
def test_handle_limit_and_offset(self, url, expected_dag_run_ids):
self._create_dag_runs(10)
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 10
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_respect_page_size_limit(self):
self._create_dag_runs(200)
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == 200
assert len(response.json["dag_runs"]) == 100 # default is 100
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self):
self._create_dag_runs(200)
response = self.client.get(
"api/v1/dags/TEST_DAG_ID/dagRuns?limit=180", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
self.assertEqual(len(response.json["dag_runs"]), 150)
def _create_dag_runs(self, count):
dag_runs = [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time) + timedelta(minutes=i),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
for i in range(1, count + 1)
]
dag = DagModel(dag_id="TEST_DAG_ID")
with create_session() as session:
session.add_all(dag_runs)
session.add(dag)
class TestGetDagRunsPaginationFilters(TestDagRunEndpoint):
@parameterized.expand(
[
(
"api/v1/dags/TEST_DAG_ID/dagRuns?start_date_gte=2020-06-18T18:00:00+00:00",
["TEST_START_EXEC_DAY_18", "TEST_START_EXEC_DAY_19"],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?start_date_lte=2020-06-11T18:00:00+00:00",
["TEST_START_EXEC_DAY_10", "TEST_START_EXEC_DAY_11"],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?start_date_lte= 2020-06-15T18:00:00+00:00"
"&start_date_gte=2020-06-12T18:00:00Z",
[
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
"TEST_START_EXEC_DAY_14",
"TEST_START_EXEC_DAY_15",
],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?execution_date_lte=2020-06-13T18:00:00+00:00",
[
"TEST_START_EXEC_DAY_10",
"TEST_START_EXEC_DAY_11",
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
],
),
(
"api/v1/dags/TEST_DAG_ID/dagRuns?execution_date_gte=2020-06-16T18:00:00+00:00",
[
"TEST_START_EXEC_DAY_16",
"TEST_START_EXEC_DAY_17",
"TEST_START_EXEC_DAY_18",
"TEST_START_EXEC_DAY_19",
],
),
]
)
@provide_session
def test_date_filters_gte_and_lte(self, url, expected_dag_run_ids, session):
dagrun_models = self._create_dag_runs()
session.add_all(dagrun_models)
session.commit()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def _create_dag_runs(self):
dates = [
"2020-06-10T18:00:00+00:00",
"2020-06-11T18:00:00+00:00",
"2020-06-12T18:00:00+00:00",
"2020-06-13T18:00:00+00:00",
"2020-06-14T18:00:00+00:00",
"2020-06-15T18:00:00Z",
"2020-06-16T18:00:00Z",
"2020-06-17T18:00:00Z",
"2020-06-18T18:00:00Z",
"2020-06-19T18:00:00Z",
]
return [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_START_EXEC_DAY_1" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(dates[i]),
start_date=timezone.parse(dates[i]),
external_trigger=True,
state="success",
)
for i in range(len(dates))
]
class TestGetDagRunsEndDateFilters(TestDagRunEndpoint):
@parameterized.expand(
[
(
f"api/v1/dags/TEST_DAG_ID/dagRuns?end_date_gte="
f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}",
[],
),
(
f"api/v1/dags/TEST_DAG_ID/dagRuns?end_date_lte="
f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}",
["TEST_DAG_RUN_ID_1"],
),
]
)
def test_end_date_gte_lte(self, url, expected_dag_run_ids):
self._create_test_dag_run('success') # state==success, then end date is today
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"] if dag_run]
assert dag_run_ids == expected_dag_run_ids
class TestGetDagRunBatch(TestDagRunEndpoint):
def test_should_respond_200(self):
self._create_test_dag_run()
response = self.client.post(
"api/v1/dags/~/dagRuns/list",
json={"dag_ids": ["TEST_DAG_ID"]},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
assert response.json == {
"dag_runs": [
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_1',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_2',
'end_date': None,
'state': 'running',
'execution_date': self.default_time_2,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
],
"total_entries": 2,
}
def test_should_return_accessible_with_tilde_as_dag_id_and_dag_level_permissions(self):
self._create_test_dag_run(extra_dag=True)
response = self.client.post(
"api/v1/dags/~/dagRuns/list",
json={"dag_ids": []},
environ_overrides={'REMOTE_USER': "test_granular_permissions"},
)
assert response.status_code == 200
assert response.json == {
"dag_runs": [
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_1',
'end_date': None,
'state': 'running',
'execution_date': self.default_time,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
{
'dag_id': 'TEST_DAG_ID',
'dag_run_id': 'TEST_DAG_RUN_ID_2',
'end_date': None,
'state': 'running',
'execution_date': self.default_time_2,
'external_trigger': True,
'start_date': self.default_time,
'conf': {},
},
],
"total_entries": 2,
}
@parameterized.expand(
[
(
{"dag_ids": ["TEST_DAG_ID"], "page_offset": -1},
"-1 is less than the minimum of 0 - 'page_offset'",
),
({"dag_ids": ["TEST_DAG_ID"], "page_limit": 0}, "0 is less than the minimum of 1 - 'page_limit'"),
({"dag_ids": "TEST_DAG_ID"}, "'TEST_DAG_ID' is not of type 'array' - 'dag_ids'"),
({"start_date_gte": "2020-06-12T18"}, "{'start_date_gte': ['Not a valid datetime.']}"),
]
)
def test_payload_validation(self, payload, error):
self._create_test_dag_run()
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
assert error == response.json.get("detail")
def test_should_raises_401_unauthenticated(self):
self._create_test_dag_run()
response = self.client.post("api/v1/dags/~/dagRuns/list", json={"dag_ids": ["TEST_DAG_ID"]})
assert_401(response)
class TestGetDagRunBatchPagination(TestDagRunEndpoint):
@parameterized.expand(
[
({"page_limit": 1}, ["TEST_DAG_RUN_ID1"]),
({"page_limit": 2}, ["TEST_DAG_RUN_ID1", "TEST_DAG_RUN_ID2"]),
(
{"page_offset": 5},
[
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
(
{"page_offset": 0},
[
"TEST_DAG_RUN_ID1",
"TEST_DAG_RUN_ID2",
"TEST_DAG_RUN_ID3",
"TEST_DAG_RUN_ID4",
"TEST_DAG_RUN_ID5",
"TEST_DAG_RUN_ID6",
"TEST_DAG_RUN_ID7",
"TEST_DAG_RUN_ID8",
"TEST_DAG_RUN_ID9",
"TEST_DAG_RUN_ID10",
],
),
({"page_offset": 5, "page_limit": 1}, ["TEST_DAG_RUN_ID6"]),
({"page_offset": 1, "page_limit": 1}, ["TEST_DAG_RUN_ID2"]),
(
{"page_offset": 2, "page_limit": 2},
["TEST_DAG_RUN_ID3", "TEST_DAG_RUN_ID4"],
),
]
)
def test_handle_limit_and_offset(self, payload, expected_dag_run_ids):
self._create_dag_runs(10)
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == 10
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def test_should_respect_page_size_limit(self):
self._create_dag_runs(200)
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json={}, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == 200
assert len(response.json["dag_runs"]) == 100 # default is 100
def _create_dag_runs(self, count):
dag_runs = [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_DAG_RUN_ID" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(self.default_time) + timedelta(minutes=i),
start_date=timezone.parse(self.default_time),
external_trigger=True,
)
for i in range(1, count + 1)
]
dag = DagModel(dag_id="TEST_DAG_ID")
with create_session() as session:
session.add_all(dag_runs)
session.add(dag)
class TestGetDagRunBatchDateFilters(TestDagRunEndpoint):
@parameterized.expand(
[
(
{"start_date_gte": "2020-06-18T18:00:00+00:00"},
["TEST_START_EXEC_DAY_18", "TEST_START_EXEC_DAY_19"],
),
(
{"start_date_lte": "2020-06-11T18:00:00+00:00"},
["TEST_START_EXEC_DAY_10", "TEST_START_EXEC_DAY_11"],
),
(
{"start_date_lte": "2020-06-15T18:00:00+00:00", "start_date_gte": "2020-06-12T18:00:00Z"},
[
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
"TEST_START_EXEC_DAY_14",
"TEST_START_EXEC_DAY_15",
],
),
(
{"execution_date_lte": "2020-06-13T18:00:00+00:00"},
[
"TEST_START_EXEC_DAY_10",
"TEST_START_EXEC_DAY_11",
"TEST_START_EXEC_DAY_12",
"TEST_START_EXEC_DAY_13",
],
),
(
{"execution_date_gte": "2020-06-16T18:00:00+00:00"},
[
"TEST_START_EXEC_DAY_16",
"TEST_START_EXEC_DAY_17",
"TEST_START_EXEC_DAY_18",
"TEST_START_EXEC_DAY_19",
],
),
]
)
def test_date_filters_gte_and_lte(self, payload, expected_dag_run_ids):
self._create_dag_runs()
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"]]
assert dag_run_ids == expected_dag_run_ids
def _create_dag_runs(self):
dates = [
'2020-06-10T18:00:00+00:00',
'2020-06-11T18:00:00+00:00',
'2020-06-12T18:00:00+00:00',
'2020-06-13T18:00:00+00:00',
'2020-06-14T18:00:00+00:00',
'2020-06-15T18:00:00Z',
'2020-06-16T18:00:00Z',
'2020-06-17T18:00:00Z',
'2020-06-18T18:00:00Z',
'2020-06-19T18:00:00Z',
]
dag = DagModel(dag_id="TEST_DAG_ID")
dag_runs = [
DagRun(
dag_id="TEST_DAG_ID",
run_id="TEST_START_EXEC_DAY_1" + str(i),
run_type=DagRunType.MANUAL,
execution_date=timezone.parse(dates[i]),
start_date=timezone.parse(dates[i]),
external_trigger=True,
state='success',
)
for i in range(len(dates))
]
with create_session() as session:
session.add_all(dag_runs)
session.add(dag)
return dag_runs
@parameterized.expand(
[
({"execution_date_gte": '2020-11-09T16:25:56.939143'}, 'Naive datetime is disallowed'),
(
{"start_date_gte": "2020-06-18T16:25:56.939143"},
'Naive datetime is disallowed',
),
(
{"start_date_lte": "2020-06-18T18:00:00.564434"},
'Naive datetime is disallowed',
),
(
{"start_date_lte": "2020-06-15T18:00:00.653434", "start_date_gte": "2020-06-12T18:00.343534"},
'Naive datetime is disallowed',
),
(
{"execution_date_lte": "2020-06-13T18:00:00.353454"},
'Naive datetime is disallowed',
),
({"execution_date_gte": "2020-06-16T18:00:00.676443"}, 'Naive datetime is disallowed'),
]
)
def test_naive_date_filters_raises_400(self, payload, expected_response):
self._create_dag_runs()
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
self.assertEqual(response.json['detail'], expected_response)
@parameterized.expand(
[
(
{"end_date_gte": f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}"},
[],
),
(
{"end_date_lte": f"{(timezone.utcnow() + timedelta(days=1)).isoformat()}"},
["TEST_DAG_RUN_ID_1"],
),
]
)
def test_end_date_gte_lte(self, payload, expected_dag_run_ids):
self._create_test_dag_run('success') # state==success, then end date is today
response = self.client.post(
"api/v1/dags/~/dagRuns/list", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json["total_entries"] == len(expected_dag_run_ids)
dag_run_ids = [dag_run["dag_run_id"] for dag_run in response.json["dag_runs"] if dag_run]
assert dag_run_ids == expected_dag_run_ids
class TestPostDagRun(TestDagRunEndpoint):
@parameterized.expand(
[
(
"All fields present",
{
"dag_run_id": "TEST_DAG_RUN",
"execution_date": "2020-06-11T18:00:00+00:00",
},
),
("dag_run_id missing", {"execution_date": "2020-06-11T18:00:00+00:00"}),
("dag_run_id and execution_date missing", {}),
]
)
@provide_session
def test_should_respond_200(self, name, request_json, session):
del name
dag_instance = DagModel(dag_id="TEST_DAG_ID")
session.add(dag_instance)
session.commit()
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns", json=request_json, environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
{
"conf": {},
"dag_id": "TEST_DAG_ID",
"dag_run_id": response.json["dag_run_id"],
"end_date": None,
"execution_date": response.json["execution_date"],
"external_trigger": True,
"start_date": response.json["start_date"],
"state": "running",
},
response.json,
)
@parameterized.expand(
[
({'execution_date': "2020-11-10T08:25:56.939143"}, 'Naive datetime is disallowed'),
({'execution_date': "2020-11-10T08:25:56P"}, "{'execution_date': ['Not a valid datetime.']}"),
]
)
@provide_session
def test_should_response_400_for_naive_datetime_and_bad_datetime(self, data, expected, session):
dag_instance = DagModel(dag_id="TEST_DAG_ID")
session.add(dag_instance)
session.commit()
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns", json=data, environ_overrides={'REMOTE_USER': "test"}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json['detail'], expected)
def test_response_404(self):
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns",
json={"dag_run_id": "TEST_DAG_RUN", "execution_date": self.default_time},
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
{
"detail": "DAG with dag_id: 'TEST_DAG_ID' not found",
"status": 404,
"title": "DAG not found",
"type": EXCEPTIONS_LINK_MAP[404],
},
response.json,
)
@parameterized.expand(
[
(
"start_date in request json",
"api/v1/dags/TEST_DAG_ID/dagRuns",
{
"start_date": "2020-06-11T18:00:00+00:00",
"execution_date": "2020-06-12T18:00:00+00:00",
},
{
"detail": "Property is read-only - 'start_date'",
"status": 400,
"title": "Bad Request",
"type": EXCEPTIONS_LINK_MAP[400],
},
),
(
"state in request json",
"api/v1/dags/TEST_DAG_ID/dagRuns",
{"state": "failed", "execution_date": "2020-06-12T18:00:00+00:00"},
{
"detail": "Property is read-only - 'state'",
"status": 400,
"title": "Bad Request",
"type": EXCEPTIONS_LINK_MAP[400],
},
),
]
)
@provide_session
def test_response_400(self, name, url, request_json, expected_response, session):
del name
dag_instance = DagModel(dag_id="TEST_DAG_ID")
session.add(dag_instance)
session.commit()
response = self.client.post(url, json=request_json, environ_overrides={'REMOTE_USER': "test"})
self.assertEqual(response.status_code, 400, response.data)
self.assertEqual(expected_response, response.json)
def test_response_409(self):
self._create_test_dag_run()
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns",
json={
"dag_run_id": "TEST_DAG_RUN_ID_1",
"execution_date": self.default_time,
},
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 409, response.data)
self.assertEqual(
response.json,
{
"detail": "DAGRun with DAG ID: 'TEST_DAG_ID' and "
"DAGRun ID: 'TEST_DAG_RUN_ID_1' already exists",
"status": 409,
"title": "Conflict",
"type": EXCEPTIONS_LINK_MAP[409],
},
)
def test_should_raises_401_unauthenticated(self):
response = self.client.post(
"api/v1/dags/TEST_DAG_ID/dagRuns",
json={
"dag_run_id": "TEST_DAG_RUN_ID_1",
"execution_date": self.default_time,
},
)
assert_401(response)
| apache-2.0 |
n4xh4ck5/RastLeak | RastLeak/old/RastLeak_1_1.py | 2 | 7141 | import requests
import wget
import json
from urlparse import urlparse
from bs4 import BeautifulSoup
import optparse
#Analyze metadata pdf
import PyPDF2
from PyPDF2 import PdfFileReader
#Analyze metadata docx
import docx
import datetime
#Parser arguments
import argparse
from argparse import RawTextHelpFormatter
#define vars
dork=["site:","-site:","filetype:","intitle:","intext:"]
urls = []
urls_clean = []
urls_final =[]
delete_bing=["microsoft","msn","bing"]
option = 0
#********************************************************#
#Define and design the dork
def DesignDork( num,file_ext):
iteration=0
initial=1
count_bing=9
try:
while (iteration < num):
#WAITING A DORK IN BING
iteration = iteration +1
if initial==1:
print "\nSearching possible leak information...\n"
initial = 0
#First search in Bing
SearchBing = "https://www.bing.com/search?q="+dork[0]+target+" ("+dork[2]+"pdf+OR+"+dork[2]+"doc)&go=Buscar"
else:
#Bring the next Bing results - 50 in each page
SearchBing=SearchBing + "&first="+str(count_bing)+"&FORM=PORE"
count_bing=count_bing+50
SendRequest(SearchBing)
except:
pass
#********************************************************#
#Doing the request to search
def SendRequest(dork):
try:
#Requests
response=requests.get(dork,allow_redirects=True)
except:
pass
content = response.text
#PARSER HTML
#normalize a called with parameters
parser_html(file_ext,content)
#********************************************************#
#Definition and treatment of the parameters
def parser_html(type,content):
i = 0
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('a'):
try:
if (urlparse(link.get('href'))!='' and urlparse(link.get('href'))[1].strip()!=''):
#if file_ext == 1: #ofimatic files: pdf, doc,docx,xls,...
if type == 1:
urls.append(urlparse(link.get('href'))[1]) #dominio
else: # file_ext == 2 -># Display the domains where the files are found.
urls.append(link.get('href'))
except Exception as e:
#print(e)
pass
try:
#Delete duplicates
[urls_clean.append(i) for i in urls if not i in urls_clean]
except:
pass
try:
#Delete not domains belongs to target
for value in urls_clean:
if (value.find(delete_bing[0]) == -1):
if (value.find(delete_bing[1]) == -1):
if (value.find(delete_bing[2]) == -1):
urls_final.append(value)
except:
pass
####### FUNCTION DOWNLOADFILES ######
def ExportResults(data):
with open ('output.json','w') as f:
json.dump(data,f)
####### FUNCTION AnalyzeMetadata pdf ######
def Analyze_Metadata_pdf(filename):
####### FUNCTION AnalyzeMetadata ######
pdfFile = PdfFileReader(file(filename, 'rb'))
metadata = pdfFile.getDocumentInfo()
print ' - Document: ' + str(filename)
for meta in metadata:
print ' - ' + meta + ':' + metadata[meta]
####### FUNCTION AnalyzeMetadata doc ######
def Analyze_Metadata_doc(fileName):
#Open file
docxFile = docx.Document(file(fileName,'rb'))
#Get the structure
docxInfo= docxFile.core_properties
#Print the metadata which it wants to display
attribute = ["author", "category", "comments", "content_status",
"created", "identifier", "keywords", "language",
"last_modified_by", "last_printed", "modified",
"revision", "subject", "title", "version"]
#run the list in a for loop to print the value of each metadata
print ' - Document: ' + str(fileName)
for meta in attribute:
metadata = getattr(docxInfo,meta)
if metadata:
#Separate the values unicode and time date
if isinstance(metadata, unicode):
print " \n\t" + str(meta)+": " + str(metadata)
elif isinstance(metadata, datetime.datetime):
print " \n\t" + str(meta)+": " + str(metadata)
def Analyze_Metadata(filename):
#Verify the ext to know the type of the file to diference of the analysis
ext=filename.lower().rsplit(".",1)[-1]
#print ext
if ext =="pdf":
#call the function analyze metadata pdf
Analyze_Metadata_pdf(filename)
if ((ext =="doc") or (ext=="docx")):
Analyze_Metadata_doc(filename)
####### FUNCTION DOWNLOADFILES ######
def Downloadfiles(urls_metadata):
print "\nDo you like downloading these files to analyze metadata(Y/N)?"
#try:
resp = raw_input().lower()
if (resp == 'n'):
print "Exiting"
exit(1)
if ((resp != 'y') and (resp != 'n')):
print "The option is not valided. Please, try again it"
if (resp =='y'):
print "Indicate the location where you want to keep the files downloaded"
path = raw_input()
try:
for url in urls_metadata:
try:
filename= wget.download(url,path)
Analyze_Metadata(filename)
except Exception, e:
print e
except:
pass
#********************************************************#
#Definition and treatment of the parameters
def ShowResults(newlist,num_files,target):
print "Files in the target "+target+" are:\n"
print "Files indexed:", len (urls_final)
for i in urls_final:
if i not in newlist:
newlist.append(i)
print i
#verify if the user wants to export results
if output == 'Y':
#Only it can enter if -j is put in the execution
ExportResults(newlist)
#Call to function to download the files
Downloadfiles(newlist)
#INICIO MAIN
parser = argparse.ArgumentParser(description='This script searchs files indexed in the main searches of a domain to detect a possible leak information', formatter_class=RawTextHelpFormatter)
parser.add_argument('-d','--domain', help="The domain which it wants to search",required=False)
parser.add_argument('-n','--search', help="Indicate the number of the search which you want to do",required=True)
parser.add_argument('-e','--ext', help='Indicate the option of display:\n\t1-Searching the domains where these files are found\n\t2-Searching ofimatic files\n\n', required=True)
parser.add_argument('-f','--export', help='Export the results to a json file (Y/N)\n\n',required=False)
args = parser.parse_args()
print " _____ _ _ _ "
print " | __ \ | | | | | | "
print" | |__) |__ _ ___| |_| | ___ __ _| | __"
print" | _ // _` / __| __| | / _ \/ _` | |/ /"
print" | | \ \ (_| \__ \ |_| |___| __/ (_| | < "
print" |_| \_\__,_|___/\__|______\___|\__,_|_|\_\""
print "\n"
print """** Tool to automatic leak information using Bing Hacking
** Version 1.1
** Author: Ignacio Brihuega Rodriguez a.k.a N4xh4ck5
** DISCLAMER This tool was developed for educational goals.
** The author is not responsible for using to others goals.
** A high power, carries a high responsibility!"""
num_files=0
N = int (args.search)
target=args.domain
file_ext= int(args.ext)
output=args.export
if ((output != 'Y') and (output != 'N')):
print "The output option is not valid"
exit(1)
#Call design the dork
try:
num_files = DesignDork(N,file_ext)
except:
pass
newlist=[]
#Called the function to display the results
ShowResults(newlist,num_files,target)
| gpl-3.0 |
HenryHu/Turpial | turpial/ui/qt/updatebox.py | 2 | 17499 | # -*- coding: utf-8 -*-
# Qt update box for Turpial
from PyQt4.QtGui import QFont
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QPixmap
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QTextEdit
from PyQt4.QtGui import QCompleter
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QTextCursor
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QFileDialog
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QTimer
from PyQt4.QtCore import pyqtSignal
from turpial.ui.lang import i18n
from turpial.ui.base import BROADCAST_ACCOUNT
from turpial.ui.qt.widgets import ImageButton, ErrorLabel, BarLoadIndicator
from libturpial.common.tools import get_urls
from libturpial.common import get_username_from, get_protocol_from
MAX_CHAR = 140
class UpdateBox(QWidget):
def __init__(self, base):
QWidget.__init__(self)
self.base = base
self.showed = False
self.setFixedSize(500, 120)
self.text_edit = CompletionTextEdit()
self.upload_button = ImageButton(base, 'action-add-media.png',
i18n.get('add_photo'), borders=True)
self.short_button = ImageButton(base, 'action-shorten.png',
i18n.get('short_urls'), borders=True)
font = QFont()
font.setPointSize(18)
font.setBold(True)
self.char_count = QLabel('140')
self.char_count.setFont(font)
self.update_button = QPushButton(i18n.get('update'))
self.update_button.setToolTip(self.base.shortcuts.get('post').caption)
self.queue_button = QPushButton(i18n.get('add_to_queue'))
self.queue_button.setToolTip(self.base.shortcuts.get('add_to_queue').caption)
self.accounts_combo = QComboBox()
buttons = QHBoxLayout()
buttons.setSpacing(4)
buttons.addWidget(self.accounts_combo)
buttons.addWidget(self.upload_button)
buttons.addWidget(self.short_button)
buttons.addStretch(0)
buttons.addWidget(self.char_count)
buttons.addWidget(self.queue_button)
buttons.addWidget(self.update_button)
self.loader = BarLoadIndicator()
self.error_message = ErrorLabel()
self.media = None
self.preview_image = QLabel()
self.preview_image.setVisible(False)
text_edit_box = QHBoxLayout()
text_edit_box.addWidget(self.text_edit)
text_edit_box.addSpacing(5)
text_edit_box.addWidget(self.preview_image)
self.update_button.clicked.connect(self.__update_status)
self.queue_button.clicked.connect(self.__queue_status)
self.short_button.clicked.connect(self.__short_urls)
self.upload_button.clicked.connect(self.__media_clicked)
self.text_edit.textChanged.connect(self.__update_count)
self.text_edit.quit.connect(self.closeEvent)
self.text_edit.activated.connect(self.__update_status)
self.text_edit.enqueued.connect(self.__queue_status)
layout = QVBoxLayout()
layout.setSpacing(0)
#layout.addWidget(self.text_edit)
layout.addLayout(text_edit_box)
layout.addWidget(self.loader)
layout.addSpacing(5)
layout.addWidget(self.error_message)
layout.addLayout(buttons)
layout.setContentsMargins(5, 5, 5, 5)
self.setLayout(layout)
self.__clear()
def __count_chars(self):
message = self.text_edit.toPlainText()
urls = [str(url) for url in get_urls(message) if len(url) > 23]
for url in urls:
message = message.replace(url, '0' * 23)
if self.media:
message += '0' * 23
return MAX_CHAR - len(message)
def __update_count(self):
remaining_chars = self.__count_chars()
if remaining_chars < 0:
self.char_count.setStyleSheet("QLabel { color: #D40D12 }")
elif remaining_chars <= 10:
self.char_count.setStyleSheet("QLabel { color: #D4790D }")
else:
self.char_count.setStyleSheet("QLabel { color: #000000 }")
self.char_count.setText(str(remaining_chars))
def __validate(self, message, accounts, index):
if len(message) == 0:
self.error(i18n.get('you_can_not_submit_an_empty_message'))
return False
if index == 0 and len(accounts) > 1:
self.error(i18n.get('select_an_account_before_post'))
return False
if self.__count_chars() < 0:
self.error(i18n.get('message_too_long'))
return False
index = self.accounts_combo.currentIndex()
account_id = str(self.accounts_combo.itemData(index).toPyObject())
if self.media and account_id == BROADCAST_ACCOUNT:
self.error(i18n.get('broadcast_status_with_media_not_supported'))
return False
return True
def __short_urls(self):
self.enable(False)
message = unicode(self.text_edit.toPlainText())
self.base.short_urls(message)
def __media_clicked(self):
if self.media:
self.__remove_media()
else:
self.__upload_media()
def __upload_media(self):
filename = str(QFileDialog.getOpenFileName(self, i18n.get('upload_image'),
self.base.home_path))
if filename != '':
self.media = filename
pix = QPixmap(filename)
scaled_pix = pix.scaled(100, 100, Qt.KeepAspectRatio)
self.preview_image.setPixmap(scaled_pix)
self.preview_image.setVisible(True)
self.upload_button.change_icon('action-remove-media.png')
self.upload_button.setToolTip(i18n.get('remove_photo'))
self.queue_button.setEnabled(False)
self.__update_count()
def __remove_media(self):
self.media = None
self.preview_image.setPixmap(QPixmap())
self.preview_image.setVisible(False)
self.upload_button.change_icon('action-add-media.png')
self.upload_button.setToolTip(i18n.get('add_photo'))
self.queue_button.setEnabled(True)
def __update_status(self):
index = self.accounts_combo.currentIndex()
accounts = self.base.core.get_registered_accounts()
message = unicode(self.text_edit.toPlainText())
if not self.__validate(message, accounts, index):
self.enable(True)
return
self.enable(False)
account_id = str(self.accounts_combo.itemData(index).toPyObject())
if self.direct_message_to:
self.base.send_direct_message(account_id, self.direct_message_to, message)
else:
if account_id == 'broadcast':
self.base.broadcast_status(message)
else:
if self.media:
self.base.update_status_with_media(account_id, message, self.media,
self.in_reply_to_id)
else:
self.base.update_status(account_id, message, self.in_reply_to_id)
def __queue_status(self):
index = self.accounts_combo.currentIndex()
accounts = self.base.core.get_registered_accounts()
account_id = str(self.accounts_combo.itemData(index).toPyObject())
message = unicode(self.text_edit.toPlainText())
if not self.__validate(message, accounts, index):
self.enable(True)
return
self.enable(False)
self.base.push_status_to_queue(account_id, message)
def __clear(self):
self.account_id = None
self.in_reply_to_id = None
self.in_reply_to_user = None
self.direct_message_to = None
self.quoting = False
self.message = None
self.media = None
self.cursor_position = None
self.text_edit.setText('')
self.accounts_combo.setCurrentIndex(0)
self.queue_button.setEnabled(True)
self.loader.setVisible(False)
self.preview_image.setPixmap(QPixmap())
self.preview_image.setVisible(False)
self.upload_button.change_icon('action-add-media.png')
self.upload_button.setToolTip(i18n.get('add_photo'))
self.error_message.setVisible(False)
self.error_message.setText('')
self.enable(True)
self.showed = False
def __show(self):
self.update_friends_list()
short_service = self.base.get_shorten_url_service()
short_tooltip = "%s (%s)" % (i18n.get('short_url'), short_service)
self.short_button.setToolTip(short_tooltip)
upload_service = self.base.get_upload_media_service()
upload_tooltip = "%s (%s)" % (i18n.get('upload_image'), upload_service)
self.upload_button.setToolTip(upload_tooltip)
self.accounts_combo.clear()
accounts = self.base.core.get_registered_accounts()
if len(accounts) > 1:
self.accounts_combo.addItem('--', '')
for account in accounts:
protocol = get_protocol_from(account.id_)
icon = QIcon(self.base.get_image_path('%s.png' % protocol))
self.accounts_combo.addItem(icon, get_username_from(account.id_), account.id_)
if len(accounts) > 1:
icon = QIcon(self.base.get_image_path('action-conversation.png'))
self.accounts_combo.addItem(icon, i18n.get('broadcast'), 'broadcast')
if self.account_id:
index = self.accounts_combo.findData(self.account_id)
if index > 0:
self.accounts_combo.setCurrentIndex(index)
self.accounts_combo.setEnabled(False)
if self.message:
self.text_edit.setText(self.message)
cursor = self.text_edit.textCursor()
cursor.movePosition(self.cursor_position, QTextCursor.MoveAnchor)
self.text_edit.setTextCursor(cursor)
QWidget.show(self)
self.showed = True
def __on_timeout(self):
self.error_message.setText('')
self.error_message.setVisible(False)
def show(self):
if self.showed:
return self.raise_()
self.setWindowTitle(i18n.get('whats_happening'))
self.__show()
def show_for_reply(self, account_id, status):
if self.showed:
return self.raise_()
title = "%s @%s" % (i18n.get('reply_to'), status.username)
self.setWindowTitle(title)
self.account_id = account_id
self.in_reply_to_id = status.id_
self.in_reply_to_user = status.username
mentions = ' '.join(["@%s" % user for user in status.get_mentions()])
self.message = "%s " % mentions
self.cursor_position = QTextCursor.End
self.__show()
def show_for_send_direct(self, account_id, username):
if self.showed:
return self.raise_()
title = "%s @%s" % (i18n.get('send_message_to'), username)
self.setWindowTitle(title)
self.account_id = account_id
self.direct_message_to = username
self.__show()
self.queue_button.setEnabled(False)
def show_for_reply_direct(self, account_id, status):
if self.showed:
return self.raise_()
title = "%s @%s" % (i18n.get('send_message_to'), status.username)
self.setWindowTitle(title)
self.account_id = account_id
self.direct_message_to = status.username
self.__show()
self.queue_button.setEnabled(False)
def show_for_quote(self, account_id, status):
if self.showed:
return self.raise_()
self.setWindowTitle(i18n.get('quoting'))
self.account_id = account_id
self.message = " RT @%s %s" % (status.username, status.text)
self.cursor_position = QTextCursor.Start
self.quoting = True
self.__show()
self.queue_button.setEnabled(False)
def closeEvent(self, event=None):
message = unicode(self.text_edit.toPlainText())
if len(message) > 0:
confirmation = self.base.show_confirmation_message(i18n.get('confirm_discard'),
i18n.get('do_you_want_to_discard_message'))
if not confirmation:
return
if event:
event.ignore()
self.done()
def enable(self, value):
self.text_edit.setEnabled(value)
if not self.account_id:
self.accounts_combo.setEnabled(value)
if self.in_reply_to_id or self.direct_message_to or self.quoting:
self.queue_button.setEnabled(False)
else:
self.queue_button.setEnabled(value)
self.upload_button.setEnabled(value)
self.short_button.setEnabled(value)
self.update_button.setEnabled(value)
self.loader.setVisible(not value)
def done(self):
self.__clear()
self.hide()
def error(self, message, response=None):
if response is not None:
message = self.base.get_error_message_from_response(response, message)
self.enable(True)
self.error_message.setText(message)
self.error_message.setVisible(True)
self.timer = QTimer()
self.timer.timeout.connect(self.__on_timeout)
self.timer.start(5000)
def after_short_url(self, message):
if self.base.is_exception(message):
self.error(i18n.get('error_shorting_url'))
else:
self.text_edit.setText(message)
self.enable(True)
def after_upload_media(self, media_url):
if self.base.is_exception(media_url):
self.error(i18n.get('error_uploading_image'))
else:
text_cursor = self.text_edit.textCursor()
text_cursor.select(QTextCursor.WordUnderCursor)
if text_cursor.selectedText() != '':
media_url = " %s" % media_url
text_cursor.clearSelection()
text_cursor.insertText(media_url)
self.text_edit.setTextCursor(text_cursor)
self.enable(True)
def update_friends_list(self):
completer = QCompleter(self.base.load_friends_list_with_extras())
self.text_edit.setCompleter(completer)
class CompletionTextEdit(QTextEdit):
IGNORED_KEYS = (
Qt.Key_Enter,
Qt.Key_Return,
Qt.Key_Escape,
Qt.Key_Tab,
Qt.Key_Backtab
)
quit = pyqtSignal()
activated = pyqtSignal()
enqueued = pyqtSignal()
def __init__(self):
QTextEdit.__init__(self)
self.completer = None
self.setAcceptRichText(False)
self.setTabChangesFocus(True)
def setCompleter(self, completer):
if self.completer:
self.completer.activated.disconnect()
self.completer = completer
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setWidget(self)
self.completer.activated.connect(self.insertCompletion)
def insertCompletion(self, completion):
if self.completer.widget() != self:
return
tc = self.textCursor()
extra = (completion.length() - self.completer.completionPrefix().length())
for i in range(self.completer.completionPrefix().length()):
tc.deletePreviousChar()
tc.insertText("%s " % str(completion))
self.setTextCursor(tc)
def textUnderCursor(self):
tc = self.textCursor()
text = ""
while True:
tc.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor)
text = tc.selectedText()
if tc.position() == 0:
break
if text.startsWith(' '):
text = text[1:]
break
return text
def focusInEvent(self, event):
if self.completer:
self.completer.setWidget(self)
QTextEdit.focusInEvent(self, event)
def keyPressEvent(self, event):
if self.completer and self.completer.popup().isVisible():
if event.key() in self.IGNORED_KEYS:
#event.ignore()
return
if event.key() == Qt.Key_Escape:
self.quit.emit()
return
hasModifier = event.modifiers() != Qt.NoModifier
enterKey = event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return
queueKey = event.key() == Qt.Key_P
if hasModifier and event.modifiers() == Qt.ControlModifier and enterKey:
self.activated.emit()
return
if hasModifier and event.modifiers() == Qt.ControlModifier and queueKey:
self.enqueued.emit()
return
QTextEdit.keyPressEvent(self, event)
completionPrefix = self.textUnderCursor()
#print completionPrefix.decode('utf-8')
if hasModifier or event.text().isEmpty() or not completionPrefix.startsWith('@'):
self.completer.popup().hide()
return
if completionPrefix.startsWith('@') and completionPrefix[1:] != self.completer.completionPrefix():
self.completer.setCompletionPrefix(completionPrefix[1:])
popup = self.completer.popup()
popup.setCurrentIndex(self.completer.completionModel().index(0, 0))
cursor_rect = self.cursorRect()
cursor_rect.setWidth(self.completer.popup().sizeHintForColumn(0)
+ self.completer.popup().verticalScrollBar().sizeHint().width())
self.completer.complete(cursor_rect)
| gpl-3.0 |
Hawstein/xhtml2pdf | xhtml2pdf/pdf.py | 37 | 1983 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xhtml2pdf.util import pisaTempFile, getFile
import logging
log = logging.getLogger("xhtml2pdf")
class pisaPDF:
def __init__(self, capacity=-1):
self.capacity = capacity
self.files = []
def addFromURI(self, url, basepath=None):
obj = getFile(url, basepath)
if obj and (not obj.notFound()):
self.files.append(obj.getFile())
addFromFileName = addFromURI
def addFromFile(self, f):
if hasattr(f, "read"):
self.files.append(f)
self.addFromURI(f)
def addFromString(self, data):
self.files.append(pisaTempFile(data, capacity=self.capacity))
def addDocument(self, doc):
if hasattr(doc.dest, "read"):
self.files.append(doc.dest)
def join(self, file=None):
import pyPdf # TODO: Why is this in the middle of everything?
if pyPdf:
output = pyPdf.PdfFileWriter()
for pdffile in self.files:
input = pyPdf.PdfFileReader(pdffile)
for pageNumber in xrange(input.getNumPages()):
output.addPage(input.getPage(pageNumber))
if file is not None:
output.write(file)
return file
out = pisaTempFile(capacity=self.capacity)
output.write(out)
return out.getvalue()
getvalue = join
__str__ = join
| apache-2.0 |
mnahm5/django-estore | Lib/site-packages/django/core/cache/backends/dummy.py | 629 | 1213 | "Dummy cache backend"
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
pass
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
| mit |
bspink/django | django/template/loaders/filesystem.py | 418 | 2158 | """
Wrapper for loading templates from the filesystem.
"""
import errno
import io
import warnings
from django.core.exceptions import SuspiciousFileOperation
from django.template import Origin, TemplateDoesNotExist
from django.utils._os import safe_join
from django.utils.deprecation import RemovedInDjango20Warning
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def get_dirs(self):
return self.engine.dirs
def get_contents(self, origin):
try:
with io.open(origin.name, encoding=self.engine.file_charset) as fp:
return fp.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise TemplateDoesNotExist(origin)
raise
def get_template_sources(self, template_name, template_dirs=None):
"""
Return an Origin object pointing to an absolute path in each directory
in template_dirs. For security reasons, if a path doesn't lie inside
one of the template_dirs it is excluded from the result set.
"""
if not template_dirs:
template_dirs = self.get_dirs()
for template_dir in template_dirs:
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
continue
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
warnings.warn(
'The load_template_sources() method is deprecated. Use '
'get_template() or get_contents() instead.',
RemovedInDjango20Warning,
)
for origin in self.get_template_sources(template_name, template_dirs):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
| bsd-3-clause |
DataDog/integrations-core | gitlab/tests/test_integration.py | 1 | 2728 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import mock
import pytest
from requests.exceptions import ConnectionError
from datadog_checks.gitlab import GitlabCheck
from .common import AUTH_CONFIG, BAD_CONFIG, CONFIG, CUSTOM_TAGS, HOST, METRICS, assert_check
pytestmark = [pytest.mark.usefixtures("dd_environment"), pytest.mark.integration]
def test_connection_failure(aggregator):
"""
Make sure we're failing when the URL isn't right
"""
gitlab = GitlabCheck('gitlab', BAD_CONFIG['init_config'], instances=BAD_CONFIG['instances'])
with pytest.raises(ConnectionError):
gitlab.check(BAD_CONFIG['instances'][0])
# We should get only one failed service check, the first
aggregator.assert_service_check(
'gitlab.{}'.format(GitlabCheck.ALLOWED_SERVICE_CHECKS[0]),
status=GitlabCheck.CRITICAL,
tags=['gitlab_host:{}'.format(HOST), 'gitlab_port:1234'] + CUSTOM_TAGS,
count=1,
)
@pytest.mark.parametrize(
'raw_version, version_metadata, count',
[
pytest.param(
'12.7.6',
{
'version.scheme': 'semver',
'version.major': '12',
'version.minor': '7',
'version.patch': '6',
'version.raw': '12.7.6',
},
5,
),
pytest.param(
'1.4.5',
{
'version.scheme': 'semver',
'version.major': '1',
'version.minor': '4',
'version.patch': '5',
'version.raw': '1.4.5',
},
5,
),
],
)
def test_check_submit_metadata(aggregator, datadog_agent, raw_version, version_metadata, count):
with mock.patch('datadog_checks.base.utils.http.requests.Response.json') as g:
# mock the api call so that it returns the given version
g.return_value = {"version": raw_version}
datadog_agent.reset()
instance = AUTH_CONFIG['instances'][0]
init_config = AUTH_CONFIG['init_config']
gitlab = GitlabCheck('gitlab', init_config, instances=[instance])
gitlab.check_id = 'test:123'
gitlab.check(instance)
datadog_agent.assert_metadata('test:123', version_metadata)
datadog_agent.assert_metadata_count(count)
def test_check_integration(aggregator, mock_data):
instance = CONFIG['instances'][0]
init_config = CONFIG['init_config']
gitlab = GitlabCheck('gitlab', init_config, instances=[instance])
gitlab.check(instance)
gitlab.check(instance)
assert_check(aggregator, METRICS)
aggregator.assert_all_metrics_covered()
| bsd-3-clause |
mharding01/herbpy | src/herbpy/tsr/bowl.py | 1 | 2604 | import numpy
from prpy.tsr.tsrlibrary import TSRFactory
from prpy.tsr.tsr import *
@TSRFactory('herb', 'plastic_bowl', 'grasp')
def bowl_grasp(robot, bowl, manip=None):
'''
@param robot The robot performing the grasp
@param bowl The bowl to grasp
@param manip The manipulator to perform the grasp,
if None the active manipulator on the robot is used
'''
if manip is None:
manip_idx = robot.GetActiveManipulatorIndex()
else:
with manip.GetRobot():
manip.SetActive()
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
T0_w = bowl.GetTransform()
Tw_e = numpy.array([[1., 0., 0., 0.08],
[0., -1., 0., 0.],
[0., 0., -1., 0.34],
[0., 0., 0., 1.]])
Bw = numpy.zeros((6,2))
Bw[2,:] = [-0.02, 0.02] # Allow a little verticle movement
Bw[5,:] = [-numpy.pi, numpy.pi] # Allow any orientation
grasp_tsr = TSR(T0_w = T0_w, Tw_e = Tw_e, Bw = Bw, manip = manip_idx)
grasp_chain = TSRChain(sample_start=False, sample_goal = True, constrain=False, TSR = grasp_tsr)
return [grasp_chain]
@TSRFactory('herb', 'plastic_bowl', 'place')
def bowl_on_table(robot, bowl, pose_tsr_chain, manip=None):
'''
Generates end-effector poses for placing the bowl on the table.
This factory assumes the bowl is grasped at the time it is called.
@param robot The robot grasping the bowl
@param bowl The grasped object
@param pose_tsr_chain The tsr chain for sampling placement poses for the bowl
@param manip The manipulator grasping the object, if None the active
manipulator of the robot is used
'''
if manip is None:
manip_idx = robot.GetActiveManipulatorIndex()
manip = robot.GetActiveManipulator()
else:
with manip.GetRobot():
manip.SetActive()
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
ee_in_bowl = numpy.dot(numpy.linalg.inv(bowl.GetTransform()), manip.GetEndEffectorTransform())
Bw = numpy.zeros((6,2))
Bw[2,:] = [0., 0.08] # Allow some vertical movement
for tsr in pose_tsr_chain.TSRs:
if tsr.manipindex != manip_idx:
raise Exception('pose_tsr_chain defined for a different manipulator.')
grasp_tsr = TSR(Tw_e = ee_in_bowl, Bw = Bw, manip = manip_idx)
all_tsrs = list(pose_tsr_chain.TSRs) + [grasp_tsr]
place_chain = TSRChain(sample_start = False, sample_goal = True, constrain = False,
TSRs = all_tsrs)
return [ place_chain ]
| bsd-3-clause |
tsilva/appier_scaffold | src/controllers/base.py | 1 | 7534 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import appier
import models
class BaseController(appier.Controller):
@appier.route("/", "GET")
def index(self):
account_s = models.Account.get_from_session(raise_e = False)
success = self.field("success")
return self.template(
"index.html.tpl",
link = "home",
account_s = account_s,
success = success
)
@appier.route("/robots.txt", "GET")
def robots(self):
return self.template(
"robots.txt"
)
@appier.route("/login", "GET")
def login(self):
account_s = models.Account.get_from_session(raise_e = False)
success = self.field("success")
error = self.field("error")
next = self.field("next")
return self.template(
"signin.html.tpl",
link = "signin",
account_s = account_s,
success = success,
error = error,
next = next
)
@appier.route("/login", "POST")
def login_post(self):
# retrieves the session account
account_s = models.Account.get_from_session(raise_e = False)
# retrieves the auth parameters
email = self.field("email")
password = self.field("password")
next = self.field("next")
# performs the login
try: account = models.Account.login(email, password)
except appier.exceptions.OperationalError, error:
return self.template(
"signin.html.tpl",
account_s = account_s,
email = email,
next = next,
error = error.message
)
# sets the account in the session
account.set_in_session()
# redirects to the correct url
if not next: next = self.url_for("base.index")
return self.redirect(next)
@appier.route("/logout", ("GET", "POST"))
def logout(self):
models.Account.clear_session()
return self.redirect(
self.url_for("base.index")
)
@appier.route("/signup", "GET")
def signup_get(self):
account_s = models.Account.get_from_session(raise_e = False)
success = self.field("success")
return self.template(
"signup.html.tpl",
link = "signup",
account_s = account_s,
account = {},
errors = {},
success = success
)
@appier.route("/signup", "POST")
def signup_post(self):
# retrieves the session account
account_s = models.Account.get_from_session(raise_e = False)
# attempts to create a new account
account = models.Account.new()
try: account.save()
except appier.exceptions.ValidationError, error:
return self.template(
"signup.html.tpl",
account_s = account_s,
account = error.model,
errors = error.errors
)
# sends the signup email
account.send_signup_email_s()
return self.redirect(
self.url_for(
"base.signup_get",
success = "An email was sent with instructions on how to confirm your account and login."
)
)
@appier.route("/confirm", "GET")
def confirm(self):
confirmation_code = self.field("confirmation_code")
models.Account.confirm_s(confirmation_code)
return self.redirect(
self.url_for(
"base.index",
success = "Your account was confirmed successfully."
)
)
@appier.route("/recover", "GET")
def recover_get(self):
account_s = models.Account.get_from_session(raise_e = False)
success = self.field("success")
return self.template(
"recover.html.tpl",
link = "recover",
account_s = account_s,
success = success
)
@appier.route("/recover", "POST")
def recover_post(self):
# retrieves the session account
account_s = models.Account.get_from_session(raise_e = False)
# attempts to retrieve the specified account
email = self.field("email")
try: account = models.Account.get(email = email)
except: return self.template(
"recover.html.tpl",
account_s = account_s,
email = email,
error = "No matching account found"
)
# sends the recovery email
account.send_recovery_email_s()
# redirects back to the recover
# page with a success message
return self.redirect(
self.url_for(
"base.recover_get",
success = "An email was sent with instructions on how to recover your account."
)
)
@appier.route("/reset", "GET")
def reset_get(self):
account_s = models.Account.get_from_session(raise_e = False)
reset_code = self.field("reset_code")
models.Account.get(reset_code = reset_code)
return self.template(
"reset.html.tpl",
link = "reset",
account_s = account_s,
reset_code = reset_code,
account = {},
errors = {}
)
@appier.route("/reset", "POST")
def reset_post(self):
# retrieves the session account
account_s = models.Account.get_from_session(raise_e = False)
# resets the account with the provided reset code
reset_code = self.field("reset_code")
try: models.Account.reset_s(reset_code)
except appier.exceptions.ValidationError, error:
return self.template(
"reset.html.tpl",
account_s = account_s,
reset_code = reset_code,
account = error.model,
errors = error.errors,
error = "Please insert a valid password (more than 5 characters, lowercase, no spaces, starting with a letter)"
)
# redirects to the signin page
return self.redirect(
self.url_for(
"base.login",
success = "Your password was changed successfully."
)
)
@appier.route("/about", "GET")
def about(self):
account_s = models.Account.get_from_session(raise_e = False)
return self.template(
"about.html.tpl",
link = "about",
account_s = account_s
)
@appier.error_handler(403)
def error_403(self, error):
return self.redirect(
self.url_for(
"base.login",
error = "Unauthorized, please login first."
)
)
@appier.error_handler(404)
def error_404(self, error):
account_s = models.Account.get_from_session(raise_e = False)
return self.template(
"error_404.html.tpl",
account_s = account_s
)
@appier.error_handler(500)
def error_500(self, error):
account_s = models.Account.get_from_session(raise_e = False)
return self.template(
"error_500.html.tpl",
account_s = account_s
)
| apache-2.0 |
kevalds51/sympy | sympy/printing/mathml.py | 58 | 16250 | """
A MathML printer.
"""
from __future__ import print_function, division
from sympy import sympify, S, Mul
from sympy.core.function import _coeff_isneg
from sympy.core.alphabets import greeks
from sympy.core.compatibility import u, range
from .printer import Printer
from .pretty.pretty_symbology import greek_unicode
from .conventions import split_super_sub, requires_partial
class MathMLPrinter(Printer):
"""Prints an expression to the MathML markup language
Whenever possible tries to use Content markup and not Presentation markup.
References: http://www.w3.org/TR/MathML2/
"""
printmethod = "_mathml"
_default_settings = {
"order": None,
"encoding": "utf-8"
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
from xml.dom.minidom import Document
self.dom = Document()
def doprint(self, expr):
"""
Prints the expression as MathML.
"""
mathML = Printer._print(self, expr)
unistr = mathML.toxml()
xmlbstr = unistr.encode('ascii', 'xmlcharrefreplace')
res = xmlbstr.decode()
return res
def mathml_tag(self, e):
"""Returns the MathML tag for an expression."""
translate = {
'Add': 'plus',
'Mul': 'times',
'Derivative': 'diff',
'Number': 'cn',
'int': 'cn',
'Pow': 'power',
'Symbol': 'ci',
'Integral': 'int',
'Sum': 'sum',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'cot': 'cot',
'asin': 'arcsin',
'asinh': 'arcsinh',
'acos': 'arccos',
'acosh': 'arccosh',
'atan': 'arctan',
'atanh': 'arctanh',
'acot': 'arccot',
'atan2': 'arctan',
'log': 'ln',
'Equality': 'eq',
'Unequality': 'neq',
'GreaterThan': 'geq',
'LessThan': 'leq',
'StrictGreaterThan': 'gt',
'StrictLessThan': 'lt',
}
for cls in e.__class__.__mro__:
n = cls.__name__
if n in translate:
return translate[n]
# Not found in the MRO set
n = e.__class__.__name__
return n.lower()
def _print_Mul(self, expr):
if _coeff_isneg(expr):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self._print_Mul(-expr))
return x
from sympy.simplify import fraction
numer, denom = fraction(expr)
if denom is not S.One:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
x.appendChild(self._print(numer))
x.appendChild(self._print(denom))
return x
coeff, terms = expr.as_coeff_mul()
if coeff is S.One and len(terms) == 1:
# XXX since the negative coefficient has been handled, I don't
# thing a coeff of 1 can remain
return self._print(terms[0])
if self.order != 'old':
terms = Mul._from_args(terms).as_ordered_factors()
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('times'))
if(coeff != 1):
x.appendChild(self._print(coeff))
for term in terms:
x.appendChild(self._print(term))
return x
def _print_Add(self, expr, order=None):
args = self._as_ordered_terms(expr, order=order)
lastProcessed = self._print(args[0])
plusNodes = []
for arg in args[1:]:
if _coeff_isneg(arg):
#use minus
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(lastProcessed)
x.appendChild(self._print(-arg))
#invert expression since this is now minused
lastProcessed = x
if(arg == args[-1]):
plusNodes.append(lastProcessed)
else:
plusNodes.append(lastProcessed)
lastProcessed = self._print(arg)
if(arg == args[-1]):
plusNodes.append(self._print(arg))
if len(plusNodes) == 1:
return lastProcessed
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('plus'))
while len(plusNodes) > 0:
x.appendChild(plusNodes.pop(0))
return x
def _print_MatrixBase(self, m):
x = self.dom.createElement('matrix')
for i in range(m.lines):
x_r = self.dom.createElement('matrixrow')
for j in range(m.cols):
x_r.appendChild(self._print(m[i, j]))
x.appendChild(x_r)
return x
def _print_Rational(self, e):
if e.q == 1:
#don't divide
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode(str(e.p)))
return x
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
#numerator
xnum = self.dom.createElement('cn')
xnum.appendChild(self.dom.createTextNode(str(e.p)))
#denomenator
xdenom = self.dom.createElement('cn')
xdenom.appendChild(self.dom.createTextNode(str(e.q)))
x.appendChild(xnum)
x.appendChild(xdenom)
return x
def _print_Limit(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x_1 = self.dom.createElement('bvar')
x_2 = self.dom.createElement('lowlimit')
x_1.appendChild(self._print(e.args[1]))
x_2.appendChild(self._print(e.args[2]))
x.appendChild(x_1)
x.appendChild(x_2)
x.appendChild(self._print(e.args[0]))
return x
def _print_ImaginaryUnit(self, e):
return self.dom.createElement('imaginaryi')
def _print_EulerGamma(self, e):
return self.dom.createElement('eulergamma')
def _print_GoldenRatio(self, e):
"""We use unicode #x3c6 for Greek letter phi as defined here
http://www.w3.org/2003/entities/2007doc/isogrk1.html"""
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode(u("\N{GREEK SMALL LETTER PHI}")))
return x
def _print_Exp1(self, e):
return self.dom.createElement('exponentiale')
def _print_Pi(self, e):
return self.dom.createElement('pi')
def _print_Infinity(self, e):
return self.dom.createElement('infinity')
def _print_Negative_Infinity(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self.dom.createElement('infinity'))
return x
def _print_Integral(self, e):
def lime_recur(limits):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
bvar_elem = self.dom.createElement('bvar')
bvar_elem.appendChild(self._print(limits[0][0]))
x.appendChild(bvar_elem)
if len(limits[0]) == 3:
low_elem = self.dom.createElement('lowlimit')
low_elem.appendChild(self._print(limits[0][1]))
x.appendChild(low_elem)
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][2]))
x.appendChild(up_elem)
if len(limits[0]) == 2:
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][1]))
x.appendChild(up_elem)
if len(limits) == 1:
x.appendChild(self._print(e.function))
else:
x.appendChild(lime_recur(limits[1:]))
return x
limits = list(e.limits)
limits.reverse()
return lime_recur(limits)
def _print_Sum(self, e):
# Printer can be shared because Sum and Integral have the
# same internal representation.
return self._print_Integral(e)
def _print_Symbol(self, sym):
ci = self.dom.createElement(self.mathml_tag(sym))
def join(items):
if len(items) > 1:
mrow = self.dom.createElement('mml:mrow')
for i, item in enumerate(items):
if i > 0:
mo = self.dom.createElement('mml:mo')
mo.appendChild(self.dom.createTextNode(" "))
mrow.appendChild(mo)
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(item))
mrow.appendChild(mi)
return mrow
else:
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(items[0]))
return mi
# translate name, supers and subs to unicode characters
greek_letters = set(greeks) # make a copy
def translate(s):
if s in greek_unicode:
return greek_unicode.get(s)
else:
return s
name, supers, subs = split_super_sub(sym.name)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
mname = self.dom.createElement('mml:mi')
mname.appendChild(self.dom.createTextNode(name))
if len(supers) == 0:
if len(subs) == 0:
ci.appendChild(self.dom.createTextNode(name))
else:
msub = self.dom.createElement('mml:msub')
msub.appendChild(mname)
msub.appendChild(join(subs))
ci.appendChild(msub)
else:
if len(subs) == 0:
msup = self.dom.createElement('mml:msup')
msup.appendChild(mname)
msup.appendChild(join(supers))
ci.appendChild(msup)
else:
msubsup = self.dom.createElement('mml:msubsup')
msubsup.appendChild(mname)
msubsup.appendChild(join(subs))
msubsup.appendChild(join(supers))
ci.appendChild(msubsup)
return ci
def _print_Pow(self, e):
#Here we use root instead of power if the exponent is the reciprocal of an integer
if e.exp.is_Rational and e.exp.p == 1:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('root'))
if e.exp.q != 2:
xmldeg = self.dom.createElement('degree')
xmlci = self.dom.createElement('ci')
xmlci.appendChild(self.dom.createTextNode(str(e.exp.q)))
xmldeg.appendChild(xmlci)
x.appendChild(xmldeg)
x.appendChild(self._print(e.base))
return x
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
x.appendChild(self._print(e.base))
x.appendChild(self._print(e.exp))
return x
def _print_Number(self, e):
x = self.dom.createElement(self.mathml_tag(e))
x.appendChild(self.dom.createTextNode(str(e)))
return x
def _print_Derivative(self, e):
x = self.dom.createElement('apply')
diff_symbol = self.mathml_tag(e)
if requires_partial(e):
diff_symbol = 'partialdiff'
x.appendChild(self.dom.createElement(diff_symbol))
x_1 = self.dom.createElement('bvar')
for sym in e.variables:
x_1.appendChild(self._print(sym))
x.appendChild(x_1)
x.appendChild(self._print(e.expr))
return x
def _print_Function(self, e):
x = self.dom.createElement("apply")
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Basic(self, e):
x = self.dom.createElement(self.mathml_tag(e))
for arg in e:
x.appendChild(self._print(arg))
return x
def _print_AssocOp(self, e):
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Relational(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x.appendChild(self._print(e.lhs))
x.appendChild(self._print(e.rhs))
return x
def _print_list(self, seq):
"""MathML reference for the <list> element:
http://www.w3.org/TR/MathML2/chapter4.html#contm.list"""
dom_element = self.dom.createElement('list')
for item in seq:
dom_element.appendChild(self._print(item))
return dom_element
def _print_int(self, p):
dom_element = self.dom.createElement(self.mathml_tag(p))
dom_element.appendChild(self.dom.createTextNode(str(p)))
return dom_element
def apply_patch(self):
# Applying the patch of xml.dom.minidom bug
# Date: 2011-11-18
# Description: http://ronrothman.com/public/leftbraned/xml-dom-minidom-\
# toprettyxml-and-silly-whitespace/#best-solution
# Issue: http://bugs.python.org/issue4147
# Patch: http://hg.python.org/cpython/rev/7262f8f276ff/
from xml.dom.minidom import Element, Text, Node, _write_data
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = list(attrs.keys())
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(
writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
self._Element_writexml_old = Element.writexml
Element.writexml = writexml
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
self._Text_writexml_old = Text.writexml
Text.writexml = writexml
def restore_patch(self):
from xml.dom.minidom import Element, Text
Element.writexml = self._Element_writexml_old
Text.writexml = self._Text_writexml_old
def mathml(expr, **settings):
"""Returns the MathML representation of expr"""
return MathMLPrinter(settings).doprint(expr)
def print_mathml(expr, **settings):
"""
Prints a pretty representation of the MathML code for expr
Examples
========
>>> ##
>>> from sympy.printing.mathml import print_mathml
>>> from sympy.abc import x
>>> print_mathml(x+1) #doctest: +NORMALIZE_WHITESPACE
<apply>
<plus/>
<ci>x</ci>
<cn>1</cn>
</apply>
"""
s = MathMLPrinter(settings)
xml = s._print(sympify(expr))
s.apply_patch()
pretty_xml = xml.toprettyxml()
s.restore_patch()
print(pretty_xml)
| bsd-3-clause |
tartley/chronotank | pyweek12/color.py | 1 | 6169 | from __future__ import division
from collections import namedtuple
from random import randint, uniform
class Color(namedtuple('__BaseColor', 'r g b a')):
'''
4-component named tuple: (r, g, b, a), all floats from 0.0 to 1.0,
with some methods.
.. function:: __init__(r, g, b[, a=1])
``r``, ``g``, ``b``: red, green and blue
``a``: alpha (defaults to fully opaque)
For example, to specify red color::
from gloopy.color import Color
red = Color(1, 0, 0)
Or semi-transparent blue::
red = Color(0, 0, 1, 0.5)
Some predefined instances of Color provide named colors. These named colors
are defined as attributes of the Color class::
from gloopy.color import Color
print Color.RoyalPurple
The names and values are taken from the top 69 results of the xkcd color
survey:
http://blog.xkcd.com/2010/05/03/color-survey-results/
'''
COMPONENTS = 4
MAX_CHANNEL = 1
__slots__ = []
# make constructor's 'a' argument optional
def __new__(cls, r, g, b, a=MAX_CHANNEL):
return super(Color, cls).__new__(cls, r, g, b, a)
@staticmethod
def Random():
'''
Return a new random color
'''
return Color(
uniform(0, Color.MAX_CHANNEL),
uniform(0, Color.MAX_CHANNEL),
uniform(0, Color.MAX_CHANNEL),
)
def tinted(self, other, bias=0.5):
'''
Return a new color, interpolated between this color and `other` by an
amount specified by `bias`, which normally ranges from 0.0 (entirely
this color) to 1.0 (entirely `other`.)
'''
unbias = 1 - bias
return Color(
self.r * unbias + other.r * bias,
self.g * unbias + other.g * bias,
self.b * unbias + other.b * bias,
self.a * unbias + other.a * bias,
)
def variations(self, other=None):
'''
Generate an infinite sequence of colors which are tinted by random
amounts towards `other`, which defaults to a darker version of this
color.
'''
if other is None:
other = self.tinted(Color.Black, 0.5)
while True:
yield self.tinted(other, uniform(0, 1))
def inverted(self):
'''
Return a new color which is the complement of this one, i.e. if this
color contains a lot of red, the return value will contain little red,
and so on.
'''
return Color(
255 - self.r,
255 - self.g,
255 - self.b,
self.a
)
Color.Blue = Color(0.012, 0.263, 0.875)
Color.Pink = Color(1.000, 0.506, 0.753)
Color.Peach = Color(1.000, 0.690, 0.486)
Color.Purple = Color(0.494, 0.118, 0.612)
Color.RoyalBlue = Color(0.020, 0.016, 0.667)
Color.LightBrown = Color(0.678, 0.506, 0.314)
Color.DarkRed = Color(0.518, 0.000, 0.000)
Color.NeonGreen = Color(0.047, 1.000, 0.047)
Color.Aquamarine = Color(0.016, 0.847, 0.698)
Color.Black = Color(0.000, 0.000, 0.000)
Color.Maroon = Color(0.396, 0.000, 0.129)
Color.Orange = Color(0.976, 0.451, 0.024)
Color.Red = Color(0.898, 0.000, 0.000)
Color.MintGreen = Color(0.561, 1.000, 0.624)
Color.PaleGreen = Color(0.780, 0.992, 0.710)
Color.Brown = Color(0.396, 0.216, 0.000)
Color.Turquoise = Color(0.024, 0.761, 0.675)
Color.Khaki = Color(0.667, 0.651, 0.384)
Color.DarkGreen = Color(0.012, 0.208, 0.000)
Color.DarkTeal = Color(0.004, 0.302, 0.306)
Color.LightPurple = Color(0.749, 0.467, 0.965)
Color.BrightBlue = Color(0.004, 0.396, 0.988)
Color.BabyBlue = Color(0.635, 0.812, 0.996)
Color.Salmon = Color(1.000, 0.475, 0.424)
Color.DarkPurple = Color(0.208, 0.024, 0.243)
Color.RoyalPurple = Color(0.294, 0.000, 0.431)
Color.BrickRed = Color(0.561, 0.078, 0.008)
Color.Rose = Color(0.812, 0.384, 0.459)
Color.Olive = Color(0.431, 0.459, 0.055)
Color.Cyan = Color(0.000, 1.000, 1.000)
Color.HotPink = Color(1.000, 0.008, 0.553)
Color.OliveGreen = Color(0.404, 0.478, 0.016)
Color.LightBlue = Color(0.584, 0.816, 0.988)
Color.Plum = Color(0.345, 0.059, 0.255)
Color.Aqua = Color(0.075, 0.918, 0.788)
Color.Grey = Color(0.576, 0.576, 0.576)
Color.YellowGreen = Color(0.753, 0.984, 0.176)
Color.LightGreen = Color(0.588, 0.976, 0.482)
Color.DarkPink = Color(0.796, 0.255, 0.420)
Color.ForestGreen = Color(0.024, 0.278, 0.047)
Color.Green = Color(0.082, 0.690, 0.102)
Color.Beige = Color(0.902, 0.855, 0.651)
Color.Teal = Color(0.008, 0.576, 0.525)
Color.PaleBlue = Color(0.816, 0.996, 0.996)
Color.Burgundy = Color(0.380, 0.000, 0.137)
Color.Tan = Color(0.820, 0.698, 0.435)
Color.Mustard = Color(0.808, 0.702, 0.004)
Color.SkyBlue = Color(0.459, 0.733, 0.992)
Color.BurntOrange = Color(0.753, 0.306, 0.004)
Color.GrassGreen = Color(0.247, 0.608, 0.043)
Color.Indigo = Color(0.220, 0.008, 0.510)
Color.Lilac = Color(0.808, 0.635, 0.992)
Color.BrightGreen = Color(0.004, 1.000, 0.027)
Color.DarkBlue = Color(0.000, 0.012, 0.357)
Color.LimeGreen = Color(0.537, 0.996, 0.020)
Color.SeaGreen = Color(0.325, 0.988, 0.631)
Color.Lavender = Color(0.780, 0.624, 0.937)
Color.Yellow = Color(1.000, 1.000, 0.078)
Color.Mauve = Color(0.682, 0.443, 0.506)
Color.NavyBlue = Color(0.000, 0.067, 0.275)
Color.LightPink = Color(1.000, 0.820, 0.875)
Color.BlueGreen = Color(0.075, 0.494, 0.427)
Color.Gold = Color(0.859, 0.706, 0.047)
Color.BrightPurple = Color(0.745, 0.012, 0.992)
Color.Violet = Color(0.604, 0.055, 0.918)
Color.Navy = Color(0.004, 0.082, 0.243)
Color.Periwinkle = Color(0.557, 0.510, 0.996)
Color.Magenta = Color(0.761, 0.000, 0.471)
Color.Lime = Color(0.667, 1.000, 0.196)
# some extras defined by me
Color.White = Color(1.000, 1.000, 1.000)
Color.LightGrey = Color(0.753, 0.753, 0.753)
Color.DarkGrey = Color(0.251, 0.251, 0.251)
| bsd-3-clause |
googleapis/google-cloud-php-debugger | synth.py | 2 | 3371 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import subprocess
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
library = gapic.php_library(
service='clouddebugger',
version='v2',
bazel_target=f'//google/devtools/clouddebugger/v2:google-cloud-devtools-clouddebugger-v2-php',
)
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(library / 'proto/src/Google/Cloud/Debugger', 'src/')
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(library / 'proto/src/GPBMetadata/Google/Devtools/Clouddebugger', 'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# fix year
s.replace(
'**/Gapic/*GapicClient.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'**/V2/Controller2Client.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'**/V2/Debugger2Client.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'tests/**/V2/*Test.php',
r'Copyright \d{4}',
'Copyright 2018')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
# format generated clients
subprocess.run([
'npm',
'exec',
'--yes',
'--package=@prettier/plugin-php@^0.16',
'--',
'prettier',
'**/Gapic/*',
'--write',
'--parser=php',
'--single-quote',
'--print-width=80'])
| apache-2.0 |
ns950/calibre | setup/installer/windows/site.py | 14 | 4425 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys
import os
import zipimport
import _memimporter
DEBUG_ZIPIMPORT = False
class ZipExtensionImporter(zipimport.zipimporter):
'''
Taken, with thanks, from the py2exe source code
'''
def __init__(self, *args, **kwargs):
zipimport.zipimporter.__init__(self, *args, **kwargs)
# We know there are no dlls in the zip file, so dont set findproc
# (performance optimization)
#_memimporter.set_find_proc(self.locate_dll_image)
def find_module(self, fullname, path=None):
result = zipimport.zipimporter.find_module(self, fullname, path)
if result:
return result
fullname = fullname.replace(".", "\\")
if (fullname + '.pyd') in self._files:
return self
return None
def locate_dll_image(self, name):
# A callback function for_memimporter.import_module. Tries to
# locate additional dlls. Returns the image as Python string,
# or None if not found.
if name in self._files:
return self.get_data(name)
return None
def load_module(self, fullname):
if sys.modules.has_key(fullname):
mod = sys.modules[fullname]
if DEBUG_ZIPIMPORT:
sys.stderr.write("import %s # previously loaded from zipfile %s\n" % (fullname, self.archive))
return mod
try:
return zipimport.zipimporter.load_module(self, fullname)
except zipimport.ZipImportError:
pass
initname = "init" + fullname.split(".")[-1] # name of initfunction
filename = fullname.replace(".", "\\")
path = filename + '.pyd'
if path in self._files:
if DEBUG_ZIPIMPORT:
sys.stderr.write("# found %s in zipfile %s\n" % (path, self.archive))
code = self.get_data(path)
mod = _memimporter.import_module(code, initname, fullname, path)
mod.__file__ = "%s\\%s" % (self.archive, path)
mod.__loader__ = self
if DEBUG_ZIPIMPORT:
sys.stderr.write("import %s # loaded from zipfile %s\n" % (fullname, mod.__file__))
return mod
raise zipimport.ZipImportError, "can't find module %s" % fullname
def __repr__(self):
return "<%s object %r>" % (self.__class__.__name__, self.archive)
def abs__file__():
"""Set all module __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except AttributeError:
continue
def aliasmbcs():
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def add_calibre_vars():
sys.resources_location = os.path.join(sys.app_dir, 'resources')
sys.extensions_location = os.path.join(sys.app_dir, 'plugins2')
dv = os.environ.get('CALIBRE_DEVELOP_FROM', None)
if dv and os.path.exists(dv):
sys.path.insert(0, os.path.abspath(dv))
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
def run_entry_point():
bname, mod, func = sys.calibre_basename, sys.calibre_module, sys.calibre_function
sys.argv[0] = bname+'.exe'
pmod = __import__(mod, fromlist=[1], level=0)
return getattr(pmod, func)()
def main():
sys.frozen = 'windows_exe'
sys.setdefaultencoding('utf-8')
aliasmbcs()
sys.path_hooks.insert(0, ZipExtensionImporter)
sys.path_importer_cache.clear()
import linecache
def fake_getline(filename, lineno, module_globals=None):
return ''
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
abs__file__()
add_calibre_vars()
# Needed for pywintypes to be able to load its DLL
sys.path.append(os.path.join(sys.app_dir, 'DLLs'))
return run_entry_point()
| gpl-3.0 |
mac389/semantic-distance | Area 51/semantic-clustering.py | 2 | 2337 | print(__doc__)
import numpy as np
import Graphics as artist
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import pylab as pl
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = pl.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show() | mit |
grueni75/GeoDiscoverer | Source/Platform/Target/Android/core/src/main/jni/freetype-2.4.2/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| gpl-3.0 |
CristianBB/SickRage | lib/unidecode/x01f.py | 252 | 3899 | data = (
'a', # 0x00
'a', # 0x01
'a', # 0x02
'a', # 0x03
'a', # 0x04
'a', # 0x05
'a', # 0x06
'a', # 0x07
'A', # 0x08
'A', # 0x09
'A', # 0x0a
'A', # 0x0b
'A', # 0x0c
'A', # 0x0d
'A', # 0x0e
'A', # 0x0f
'e', # 0x10
'e', # 0x11
'e', # 0x12
'e', # 0x13
'e', # 0x14
'e', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'E', # 0x18
'E', # 0x19
'E', # 0x1a
'E', # 0x1b
'E', # 0x1c
'E', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'e', # 0x20
'e', # 0x21
'e', # 0x22
'e', # 0x23
'e', # 0x24
'e', # 0x25
'e', # 0x26
'e', # 0x27
'E', # 0x28
'E', # 0x29
'E', # 0x2a
'E', # 0x2b
'E', # 0x2c
'E', # 0x2d
'E', # 0x2e
'E', # 0x2f
'i', # 0x30
'i', # 0x31
'i', # 0x32
'i', # 0x33
'i', # 0x34
'i', # 0x35
'i', # 0x36
'i', # 0x37
'I', # 0x38
'I', # 0x39
'I', # 0x3a
'I', # 0x3b
'I', # 0x3c
'I', # 0x3d
'I', # 0x3e
'I', # 0x3f
'o', # 0x40
'o', # 0x41
'o', # 0x42
'o', # 0x43
'o', # 0x44
'o', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'O', # 0x48
'O', # 0x49
'O', # 0x4a
'O', # 0x4b
'O', # 0x4c
'O', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'u', # 0x50
'u', # 0x51
'u', # 0x52
'u', # 0x53
'u', # 0x54
'u', # 0x55
'u', # 0x56
'u', # 0x57
'[?]', # 0x58
'U', # 0x59
'[?]', # 0x5a
'U', # 0x5b
'[?]', # 0x5c
'U', # 0x5d
'[?]', # 0x5e
'U', # 0x5f
'o', # 0x60
'o', # 0x61
'o', # 0x62
'o', # 0x63
'o', # 0x64
'o', # 0x65
'o', # 0x66
'o', # 0x67
'O', # 0x68
'O', # 0x69
'O', # 0x6a
'O', # 0x6b
'O', # 0x6c
'O', # 0x6d
'O', # 0x6e
'O', # 0x6f
'a', # 0x70
'a', # 0x71
'e', # 0x72
'e', # 0x73
'e', # 0x74
'e', # 0x75
'i', # 0x76
'i', # 0x77
'o', # 0x78
'o', # 0x79
'u', # 0x7a
'u', # 0x7b
'o', # 0x7c
'o', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'a', # 0x80
'a', # 0x81
'a', # 0x82
'a', # 0x83
'a', # 0x84
'a', # 0x85
'a', # 0x86
'a', # 0x87
'A', # 0x88
'A', # 0x89
'A', # 0x8a
'A', # 0x8b
'A', # 0x8c
'A', # 0x8d
'A', # 0x8e
'A', # 0x8f
'e', # 0x90
'e', # 0x91
'e', # 0x92
'e', # 0x93
'e', # 0x94
'e', # 0x95
'e', # 0x96
'e', # 0x97
'E', # 0x98
'E', # 0x99
'E', # 0x9a
'E', # 0x9b
'E', # 0x9c
'E', # 0x9d
'E', # 0x9e
'E', # 0x9f
'o', # 0xa0
'o', # 0xa1
'o', # 0xa2
'o', # 0xa3
'o', # 0xa4
'o', # 0xa5
'o', # 0xa6
'o', # 0xa7
'O', # 0xa8
'O', # 0xa9
'O', # 0xaa
'O', # 0xab
'O', # 0xac
'O', # 0xad
'O', # 0xae
'O', # 0xaf
'a', # 0xb0
'a', # 0xb1
'a', # 0xb2
'a', # 0xb3
'a', # 0xb4
'[?]', # 0xb5
'a', # 0xb6
'a', # 0xb7
'A', # 0xb8
'A', # 0xb9
'A', # 0xba
'A', # 0xbb
'A', # 0xbc
'\'', # 0xbd
'i', # 0xbe
'\'', # 0xbf
'~', # 0xc0
'"~', # 0xc1
'e', # 0xc2
'e', # 0xc3
'e', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'e', # 0xc7
'E', # 0xc8
'E', # 0xc9
'E', # 0xca
'E', # 0xcb
'E', # 0xcc
'\'`', # 0xcd
'\'\'', # 0xce
'\'~', # 0xcf
'i', # 0xd0
'i', # 0xd1
'i', # 0xd2
'i', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'i', # 0xd6
'i', # 0xd7
'I', # 0xd8
'I', # 0xd9
'I', # 0xda
'I', # 0xdb
'[?]', # 0xdc
'`\'', # 0xdd
'`\'', # 0xde
'`~', # 0xdf
'u', # 0xe0
'u', # 0xe1
'u', # 0xe2
'u', # 0xe3
'R', # 0xe4
'R', # 0xe5
'u', # 0xe6
'u', # 0xe7
'U', # 0xe8
'U', # 0xe9
'U', # 0xea
'U', # 0xeb
'R', # 0xec
'"`', # 0xed
'"\'', # 0xee
'`', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'o', # 0xf2
'o', # 0xf3
'o', # 0xf4
'[?]', # 0xf5
'o', # 0xf6
'o', # 0xf7
'O', # 0xf8
'O', # 0xf9
'O', # 0xfa
'O', # 0xfb
'O', # 0xfc
'\'', # 0xfd
'`', # 0xfe
)
| gpl-3.0 |
mickojavanese/support-tools | wiki_to_md/wiki2gfm.py | 25 | 4695 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to convert Google Code Wiki files to GitHub-flavored Markdown.
Reference for Google Code Wiki:
https://code.google.com/p/support/wiki/WikiSyntax
Reference for Github-flavored Markdown:
https://help.github.com/articles/github-flavored-markdown
The conversion process is not always directly possible; for example,
wiki pragma statements have no direct equivalent for GFM. In cases
where no direct conversion can be made, or the input may have unexpected
output, a warning will be issued.
"""
import argparse
import codecs
import os
import sys
from impl import converter as converter_mod
from impl import formatting_handler as formatting_handler_mod
from impl import pragma_handler as pragma_handler_mod
def PrintWarning(input_line, message):
"""Print a warning.
When a conversion cannot be done or may be unreliable/inexact,
a warning will be printed to stdout notifying the user of this.
Args:
input_line: The line number this warning occurred on.
message: The warning message.
"""
print "Warning (line {0} of input file):\n{1}\n".format(input_line, message)
def main(args):
"""The main function.
Args:
args: The command line arguments.
"""
parser = argparse.ArgumentParser(
description="Converts a Google Code wiki page to GitHub-flavored "
"Markdown.")
parser.add_argument("--input_file", required=True,
help="The input Google Code Wiki file")
parser.add_argument("--output_file", required=True,
help="The output GitHub-flavored Markdown file")
parser.add_argument("--project", required=False,
help="The name of the project for the Wiki")
parser.add_argument("--wikipages_list", nargs="*",
help="The list of wiki pages that are assumed to exist "
"for the purpose of auto-linking to other pages")
parser.add_argument("--wikipages_path", nargs="*",
help="The list of paths containing wiki pages that are "
"assumed to exist for the purpose of auto-linking to "
"other pages")
symmetric_headers_help = ("Controls if the output of header level "
"indicators are made symmetric. E.g. '### Header' "
"if disabled, and '### Header ###' if enabled")
parser.add_argument("--symmetric_headers", dest="symmetric_headers",
action="store_true", help=symmetric_headers_help)
parser.add_argument("--no_symmetric_headers", dest="symmetric_headers",
action="store_false", help=symmetric_headers_help)
parser.set_defaults(feature=False)
parsed_args, unused_unknown_args = parser.parse_known_args(args)
with codecs.open(parsed_args.input_file, "rU", "utf-8") as input_stream:
with codecs.open(parsed_args.output_file, "wU", "utf-8") as output_stream:
# Create the master list of wiki pages assumed to exist.
wikipages = parsed_args.wikipages_list or []
wikipages.append(parsed_args.input_file)
if parsed_args.wikipages_path:
# Add all the .wiki files in all the given paths.
for path in parsed_args.wikipages_path:
for f in os.listdir(path):
if f.endswith(".wiki"):
wikipages.append(f[:-len(".wiki")])
# Fill this will a mapping from Google Code issue
# to GitHub issue to automate that conversion.
issue_map = {}
# Prepare the handlers and converter.
pragma_handler = pragma_handler_mod.PragmaHandler(PrintWarning)
formatting_handler = formatting_handler_mod.FormattingHandler(
PrintWarning,
parsed_args.project,
issue_map,
parsed_args.symmetric_headers)
converter = converter_mod.Converter(
pragma_handler,
formatting_handler,
PrintWarning,
parsed_args.project,
wikipages)
# And perform the conversion.
converter.Convert(input_stream, output_stream)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/tests/test_parser2.py | 451 | 2119 | from __future__ import absolute_import, division, unicode_literals
import io
from . import support # flake8: noqa
from html5lib import html5parser
from html5lib.constants import namespaces
from html5lib import treebuilders
import unittest
# tests that aren't autogenerated from text files
class MoreParserTests(unittest.TestCase):
def setUp(self):
self.dom_tree = treebuilders.getTreeBuilder("dom")
def test_assertDoctypeCloneable(self):
parser = html5parser.HTMLParser(tree=self.dom_tree)
doc = parser.parse('<!DOCTYPE HTML>')
self.assertTrue(doc.cloneNode(True))
def test_line_counter(self):
# http://groups.google.com/group/html5lib-discuss/browse_frm/thread/f4f00e4a2f26d5c0
parser = html5parser.HTMLParser(tree=self.dom_tree)
parser.parse("<pre>\nx\n>\n</pre>")
def test_namespace_html_elements_0_dom(self):
parser = html5parser.HTMLParser(tree=self.dom_tree, namespaceHTMLElements=True)
doc = parser.parse("<html></html>")
self.assertTrue(doc.childNodes[0].namespaceURI == namespaces["html"])
def test_namespace_html_elements_1_dom(self):
parser = html5parser.HTMLParser(tree=self.dom_tree, namespaceHTMLElements=False)
doc = parser.parse("<html></html>")
self.assertTrue(doc.childNodes[0].namespaceURI is None)
def test_namespace_html_elements_0_etree(self):
parser = html5parser.HTMLParser(namespaceHTMLElements=True)
doc = parser.parse("<html></html>")
self.assertTrue(list(doc)[0].tag == "{%s}html" % (namespaces["html"],))
def test_namespace_html_elements_1_etree(self):
parser = html5parser.HTMLParser(namespaceHTMLElements=False)
doc = parser.parse("<html></html>")
self.assertTrue(list(doc)[0].tag == "html")
def test_unicode_file(self):
parser = html5parser.HTMLParser()
parser.parse(io.StringIO("a"))
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
thomassa/xen-api | ocaml/xapi/tests/looper2.py | 38 | 1410 | #!/usr/bin/python
print "Program attempts to log into an XAPI server to fetch a list of VMs and"
print "a list of debug objects. It then chooses the first debug object, "
print "queries the int->float map and then calls the 'recycle' message using"
print "that map as an argument"
print
import getopt, sys, xapi
url = "http://localhost:8086" #default
parsed = getopt.getopt(sys.argv[1:], "u:url")
if len(parsed[0]) == 1:
url = parsed[0][0][1]
print "Connecting to server on URL: ", url
print "(change with -u argument)"
# Create an object to represent our server.
server = xapi.Server(url);
# Call the server and get our result.
print "Logging in... ",
session = server.Session.login_with_password("user", "passwd")
print "OK"
print "Session ID: \""+session+"\""
vm_list = server.VM.get_all(session)
print "VM list = " + repr(vm_list)
for vm in vm_list:
print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm)
first_vm = vm_list[0]
debug_objs = server.Debug.get_all(session)
debug = debug_objs[0]
ifm = server.Debug.get_int_float_map(session, debug)
print "Got an int->float map: " + repr(ifm)
print "doing the int_float_map recycle thing"
attempt = 0
while 1:
this = server.Debug.recycle_int_float_map(ifm)
if ifm <> this:
print "Got a different response!"
print "this = ", repr(this)
print "ifm = ", repr(ifm)
raise "Failed"
attempt = attempt + 1
print attempt
| lgpl-2.1 |
jiangxb1987/spark | examples/src/main/python/ml/interaction_example.py | 24 | 1907 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import Interaction, VectorAssembler
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("InteractionExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(1, 1, 2, 3, 8, 4, 5),
(2, 4, 3, 8, 7, 9, 8),
(3, 6, 1, 9, 2, 3, 6),
(4, 10, 8, 6, 9, 4, 5),
(5, 9, 2, 7, 10, 7, 3),
(6, 1, 1, 4, 2, 8, 4)],
["id1", "id2", "id3", "id4", "id5", "id6", "id7"])
assembler1 = VectorAssembler(inputCols=["id2", "id3", "id4"], outputCol="vec1")
assembled1 = assembler1.transform(df)
assembler2 = VectorAssembler(inputCols=["id5", "id6", "id7"], outputCol="vec2")
assembled2 = assembler2.transform(assembled1).select("id1", "vec1", "vec2")
interaction = Interaction(inputCols=["id1", "vec1", "vec2"], outputCol="interactedCol")
interacted = interaction.transform(assembled2)
interacted.show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
etashjian/ECE757-final | src/arch/x86/isa/insts/system/control_registers.py | 91 | 3073 | # Copyright (c) 2009 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CLTS {
rdcr t1, regIdx(0), dataSize=8
andi t1, t1, 0xF7, dataSize=1
wrcr regIdx(0), t1, dataSize=8
};
def macroop LMSW_R {
rdcr t1, regIdx(0), dataSize=8
# This logic sets MP, EM, and TS to whatever is in the operand. It will
# set PE but not clear it.
limm t2, "~ULL(0xe)", dataSize=8
and t1, t1, t2, dataSize=8
andi t2, reg, 0xf, dataSize=8
or t1, t1, t2, dataSize=8
wrcr regIdx(0), t1, dataSize=8
};
def macroop LMSW_M {
ld t3, seg, sib, disp, dataSize=2
rdcr t1, regIdx(0), dataSize=8
# This logic sets MP, EM, and TS to whatever is in the operand. It will
# set PE but not clear it.
limm t2, "~ULL(0xe)", dataSize=8
and t1, t1, t2, dataSize=8
andi t2, t3, 0xf, dataSize=8
or t1, t1, t2, dataSize=8
wrcr regIdx(0), t1, dataSize=8
};
def macroop LMSW_P {
rdip t7, dataSize=asz
ld t3, seg, riprel, disp, dataSize=2
rdcr t1, regIdx(0), dataSize=8
# This logic sets MP, EM, and TS to whatever is in the operand. It will
# set PE but not clear it.
limm t2, "~ULL(0xe)", dataSize=8
and t1, t1, t2, dataSize=8
andi t2, t3, 0xf, dataSize=8
or t1, t1, t2, dataSize=8
wrcr regIdx(0), t1, dataSize=8
};
def macroop SMSW_R {
rdcr reg, regIdx(0)
};
def macroop SMSW_M {
rdcr t1, regIdx(0)
st t1, seg, sib, disp, dataSize=2
};
def macroop SMSW_P {
rdcr t1, regIdx(0)
rdip t7, dataSize=asz
st t1, seg, riprel, disp, dataSize=2
};
'''
| bsd-3-clause |
skidzo/sympy | sympy/strategies/rl.py | 94 | 4296 | """ Generic Rules for SymPy
This file assumes knowledge of Basic and little else.
"""
from __future__ import print_function, division
from sympy.utilities.iterables import sift
from .util import new
# Functions that create rules
def rm_id(isid, new=new):
""" Create a rule to remove identities
isid - fn :: x -> Bool --- whether or not this element is an identity
>>> from sympy.strategies import rm_id
>>> from sympy import Basic
>>> remove_zeros = rm_id(lambda x: x==0)
>>> remove_zeros(Basic(1, 0, 2))
Basic(1, 2)
>>> remove_zeros(Basic(0, 0)) # If only identites then we keep one
Basic(0)
See Also:
unpack
"""
def ident_remove(expr):
""" Remove identities """
ids = list(map(isid, expr.args))
if sum(ids) == 0: # No identities. Common case
return expr
elif sum(ids) != len(ids): # there is at least one non-identity
return new(expr.__class__,
*[arg for arg, x in zip(expr.args, ids) if not x])
else:
return new(expr.__class__, expr.args[0])
return ident_remove
def glom(key, count, combine):
""" Create a rule to conglomerate identical args
>>> from sympy.strategies import glom
>>> from sympy import Add
>>> from sympy.abc import x
>>> key = lambda x: x.as_coeff_Mul()[1]
>>> count = lambda x: x.as_coeff_Mul()[0]
>>> combine = lambda cnt, arg: cnt * arg
>>> rl = glom(key, count, combine)
>>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False))
3*x + 5
Wait, how are key, count and combine supposed to work?
>>> key(2*x)
x
>>> count(2*x)
2
>>> combine(2, x)
2*x
"""
def conglomerate(expr):
""" Conglomerate together identical args x + x -> 2x """
groups = sift(expr.args, key)
counts = dict((k, sum(map(count, args))) for k, args in groups.items())
newargs = [combine(cnt, mat) for mat, cnt in counts.items()]
if set(newargs) != set(expr.args):
return new(type(expr), *newargs)
else:
return expr
return conglomerate
def sort(key, new=new):
""" Create a rule to sort by a key function
>>> from sympy.strategies import sort
>>> from sympy import Basic
>>> sort_rl = sort(str)
>>> sort_rl(Basic(3, 1, 2))
Basic(1, 2, 3)
"""
def sort_rl(expr):
return new(expr.__class__, *sorted(expr.args, key=key))
return sort_rl
def distribute(A, B):
""" Turns an A containing Bs into a B of As
where A, B are container types
>>> from sympy.strategies import distribute
>>> from sympy import Add, Mul, symbols
>>> x, y = symbols('x,y')
>>> dist = distribute(Mul, Add)
>>> expr = Mul(2, x+y, evaluate=False)
>>> expr
2*(x + y)
>>> dist(expr)
2*x + 2*y
"""
def distribute_rl(expr):
for i, arg in enumerate(expr.args):
if isinstance(arg, B):
first, b, tail = expr.args[:i], expr.args[i], expr.args[i+1:]
return B(*[A(*(first + (arg,) + tail)) for arg in b.args])
return expr
return distribute_rl
def subs(a, b):
""" Replace expressions exactly """
def subs_rl(expr):
if expr == a:
return b
else:
return expr
return subs_rl
# Functions that are rules
def unpack(expr):
""" Rule to unpack singleton args
>>> from sympy.strategies import unpack
>>> from sympy import Basic
>>> unpack(Basic(2))
2
"""
if len(expr.args) == 1:
return expr.args[0]
else:
return expr
def flatten(expr, new=new):
""" Flatten T(a, b, T(c, d), T2(e)) to T(a, b, c, d, T2(e)) """
cls = expr.__class__
args = []
for arg in expr.args:
if arg.__class__ == cls:
args.extend(arg.args)
else:
args.append(arg)
return new(expr.__class__, *args)
def rebuild(expr):
""" Rebuild a SymPy tree
This function recursively calls constructors in the expression tree.
This forces canonicalization and removes ugliness introduced by the use of
Basic.__new__
"""
try:
return type(expr)(*list(map(rebuild, expr.args)))
except Exception:
return expr
| bsd-3-clause |
Lmaths/linux-stable-rcn-ee | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 1891 | 3300 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
niamoto/niamoto-core | niamoto/utils.py | 2 | 2339 | # coding: utf-8
import re
from sphinx.util import docstrings
# --------------------------------------------------------------- #
# The Following code is taken from #
# https://github.com/openstack/rally/blob/master/rally/common/ #
# plugin/info.py#L31-L78 #
# Please refer to the link for licence information. #
# --------------------------------------------------------------- #
PARAM_OR_RETURNS_REGEX = re.compile(":(?:param|returns)")
RETURNS_REGEX = re.compile(":returns: (?P<doc>.*)", re.S)
PARAM_REGEX = re.compile(":param (?P<name>[\*\w]+): (?P<doc>.*?)"
"(?:(?=:param)|(?=:return)|(?=:raises)|\Z)", re.S)
def reindent(string):
return "\n".join(l.strip() for l in string.strip().split("\n"))
def parse_docstring(docstring):
"""Parse the docstring into its components.
:returns: a dictionary of form
{
"short_description": ...,
"long_description": ...,
"params": {"name": doc, },
"returns": ...
}
"""
short_description = long_description = returns = ""
params = {}
if docstring:
docstring = "\n".join(docstrings.prepare_docstring(docstring))
lines = docstring.split("\n", 1)
short_description = lines[0]
if len(lines) > 1:
long_description = lines[1].strip()
params_returns_desc = None
match = PARAM_OR_RETURNS_REGEX.search(long_description)
if match:
long_desc_end = match.start()
params_returns_desc = long_description[long_desc_end:].strip()
long_description = long_description[:long_desc_end].rstrip()
if params_returns_desc:
params = {
name: "\n".join(docstrings.prepare_docstring(doc))
for name, doc in PARAM_REGEX.findall(params_returns_desc)
}
match = RETURNS_REGEX.search(params_returns_desc)
if match:
returns = reindent(match.group("doc"))
return {
"short_description": short_description,
"long_description": long_description,
"params": params,
"returns": returns
}
| gpl-3.0 |
altairpearl/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 86 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
AlexanderDolgan/sputnik | wp-content/themes/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 270 | 8338 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| gpl-2.0 |
pheinrichs/librenms | LibreNMS/service.py | 14 | 31186 | import LibreNMS
import json
import logging
import os
import subprocess
import threading
import sys
import time
import timeit
from datetime import timedelta
from logging import debug, info, warning, error, critical, exception
from platform import python_version
from time import sleep
from socket import gethostname
from signal import signal, SIGTERM
from uuid import uuid1
class PerformanceCounter(object):
"""
This is a simple counter to record execution time and number of jobs. It's unique to each
poller instance, so does not need to be globally syncronised, just locally.
"""
def __init__(self):
self._count = 0
self._jobs = 0
self._lock = threading.Lock()
def add(self, n):
"""
Add n to the counter and increment the number of jobs by 1
:param n: Number to increment by
"""
with self._lock:
self._count += n
self._jobs += 1
def split(self, precise=False):
"""
Return the current counter value and keep going
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
return (self._count if precise else int(self._count)), self._jobs
def reset(self, precise=False):
"""
Return the current counter value and then zero it.
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
with self._lock:
c = self._count
j = self._jobs
self._count = 0
self._jobs = 0
return (c if precise else int(c)), j
class TimeitContext(object):
"""
Wrapper around timeit to allow the timing of larger blocks of code by wrapping them in "with"
"""
def __init__(self):
self._t = timeit.default_timer()
def __enter__(self):
return self
def __exit__(self, *args):
del self._t
def delta(self):
"""
Calculate the elapsed time since the context was initialised
:return: FLOAT
"""
if not self._t:
raise ArithmeticError("Timer has not been started, cannot return delta")
return timeit.default_timer() - self._t
@classmethod
def start(cls):
"""
Factory method for TimeitContext
:param cls:
:return: TimeitContext
"""
return cls()
class ServiceConfig:
def __init__(self):
"""
Stores all of the configuration variables for the LibreNMS service in a common object
Starts with defaults, but can be populated with variables from config.php by calling populate()
"""
self._uuid = str(uuid1())
self.set_name(gethostname())
def set_name(self, name):
if name:
self.name = name.strip()
self.unique_name = "{}-{}".format(self.name, self._uuid)
class PollerConfig:
def __init__(self, workers, frequency, calculate=None):
self.workers = workers
self.frequency = frequency
self.calculate = calculate
# config variables with defaults
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
node_id = None
name = None
unique_name = None
single_instance = True
distributed = False
group = 0
debug = False
log_level = 20
alerting = PollerConfig(1, 60)
poller = PollerConfig(24, 300)
services = PollerConfig(8, 300)
discovery = PollerConfig(16, 21600)
billing = PollerConfig(2, 300, 60)
down_retry = 60
update_frequency = 86400
master_resolution = 1
master_timeout = 10
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
redis_pass = None
redis_socket = None
db_host = 'localhost'
db_port = 0
db_socket = None
db_user = 'librenms'
db_pass = ''
db_name = 'librenms'
def populate(self):
config = self._get_config_data()
# populate config variables
self.node_id = os.getenv('NODE_ID')
self.set_name(config.get('distributed_poller_name', None))
self.distributed = config.get('distributed_poller', ServiceConfig.distributed)
self.group = ServiceConfig.parse_group(config.get('distributed_poller_group', ServiceConfig.group))
# backward compatible options
self.poller.workers = config.get('poller_service_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('poller_service_poll_frequency', ServiceConfig.poller.frequency)
self.discovery.frequency = config.get('poller_service_discover_frequency', ServiceConfig.discovery.frequency)
self.down_retry = config.get('poller_service_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('poller_service_loglevel', ServiceConfig.log_level)
# new options
self.poller.workers = config.get('service_poller_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('service_poller_frequency', ServiceConfig.poller.frequency)
self.services.workers = config.get('service_services_workers', ServiceConfig.services.workers)
self.services.frequency = config.get('service_services_frequency', ServiceConfig.services.frequency)
self.discovery.workers = config.get('service_discovery_workers', ServiceConfig.discovery.workers)
self.discovery.frequency = config.get('service_discovery_frequency', ServiceConfig.discovery.frequency)
self.billing.frequency = config.get('service_billing_frequency', ServiceConfig.billing.frequency)
self.billing.calculate = config.get('service_billing_calculate_frequency', ServiceConfig.billing.calculate)
self.down_retry = config.get('service_poller_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('service_loglevel', ServiceConfig.log_level)
self.update_frequency = config.get('service_update_frequency', ServiceConfig.update_frequency)
self.redis_host = os.getenv('REDIS_HOST', config.get('redis_host', ServiceConfig.redis_host))
self.redis_db = os.getenv('REDIS_DB', config.get('redis_db', ServiceConfig.redis_db))
self.redis_pass = os.getenv('REDIS_PASSWORD', config.get('redis_pass', ServiceConfig.redis_pass))
self.redis_port = int(os.getenv('REDIS_PORT', config.get('redis_port', ServiceConfig.redis_port)))
self.redis_socket = os.getenv('REDIS_SOCKET', config.get('redis_socket', ServiceConfig.redis_socket))
self.db_host = os.getenv('DB_HOST', config.get('db_host', ServiceConfig.db_host))
self.db_name = os.getenv('DB_DATABASE', config.get('db_name', ServiceConfig.db_name))
self.db_pass = os.getenv('DB_PASSWORD', config.get('db_pass', ServiceConfig.db_pass))
self.db_port = int(os.getenv('DB_PORT', config.get('db_port', ServiceConfig.db_port)))
self.db_socket = os.getenv('DB_SOCKET', config.get('db_socket', ServiceConfig.db_socket))
self.db_user = os.getenv('DB_USERNAME', config.get('db_user', ServiceConfig.db_user))
# set convenient debug variable
self.debug = logging.getLogger().isEnabledFor(logging.DEBUG)
if not self.debug and self.log_level:
try:
logging.getLogger().setLevel(self.log_level)
except ValueError:
error("Unknown log level {}, must be one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'".format(self.log_level))
logging.getLogger().setLevel(logging.INFO)
def _get_config_data(self):
try:
import dotenv
env_path = "{}/.env".format(self.BASE_DIR)
info("Attempting to load .env from '%s'", env_path)
dotenv.load_dotenv(dotenv_path=env_path, verbose=True)
if not os.getenv('NODE_ID'):
raise ImportError(".env does not contain a valid NODE_ID setting.")
except ImportError as e:
exception("Could not import .env - check that the poller user can read the file, and that composer install has been run recently")
sys.exit(3)
config_cmd = ['/usr/bin/env', 'php', '{}/config_to_json.php'.format(self.BASE_DIR), '2>&1']
try:
return json.loads(subprocess.check_output(config_cmd).decode())
except subprocess.CalledProcessError as e:
error("ERROR: Could not load or parse configuration! {}: {}"
.format(subprocess.list2cmdline(e.cmd), e.output.decode()))
@staticmethod
def parse_group(g):
if g is None:
return [0]
elif type(g) is int:
return [g]
elif type(g) is str:
try:
return [int(x) for x in set(g.split(','))]
except ValueError:
pass
error("Could not parse group string, defaulting to 0")
return [0]
class Service:
config = ServiceConfig()
_fp = False
_started = False
alerting_manager = None
poller_manager = None
discovery_manager = None
services_manager = None
billing_manager = None
last_poll = {}
terminate_flag = False
def __init__(self):
self.config.populate()
threading.current_thread().name = self.config.name # rename main thread
self.attach_signals()
# init database connections different ones for different threads
self._db = LibreNMS.DB(self.config) # main
self._services_db = LibreNMS.DB(self.config) # services dispatch
self._discovery_db = LibreNMS.DB(self.config) # discovery dispatch
self._lm = self.create_lock_manager()
self.daily_timer = LibreNMS.RecurringTimer(self.config.update_frequency, self.run_maintenance, 'maintenance')
self.stats_timer = LibreNMS.RecurringTimer(self.config.poller.frequency, self.log_performance_stats, 'performance')
self.is_master = False
self.performance_stats = {'poller': PerformanceCounter(), 'discovery': PerformanceCounter(), 'services': PerformanceCounter()}
def attach_signals(self):
info("Attaching signal handlers on thread %s", threading.current_thread().name)
signal(SIGTERM, self.terminate) # capture sigterm and exit gracefully
def start(self):
debug("Performing startup checks...")
if self.config.single_instance:
self.check_single_instance() # don't allow more than one service at a time
if self._started:
raise RuntimeWarning("Not allowed to start Poller twice")
self._started = True
debug("Starting up queue managers...")
# initialize and start the worker pools
self.poller_manager = LibreNMS.QueueManager(self.config, 'poller', self.poll_device)
self.alerting_manager = LibreNMS.TimedQueueManager(self.config, 'alerting', self.poll_alerting,
self.dispatch_alerting)
self.services_manager = LibreNMS.TimedQueueManager(self.config, 'services', self.poll_services,
self.dispatch_services)
self.discovery_manager = LibreNMS.TimedQueueManager(self.config, 'discovery', self.discover_device,
self.dispatch_discovery)
self.billing_manager = LibreNMS.BillingQueueManager(self.config, self.poll_billing,
self.dispatch_poll_billing, self.dispatch_calculate_billing)
self.daily_timer.start()
self.stats_timer.start()
info("LibreNMS Service: {} started!".format(self.config.unique_name))
info("Poller group {}. Using Python {} and {} locks and queues"
.format('0 (default)' if self.config.group == [0] else self.config.group, python_version(),
'redis' if isinstance(self._lm, LibreNMS.RedisLock) else 'internal'))
info("Maintenance tasks will be run every {}".format(timedelta(seconds=self.config.update_frequency)))
# Main dispatcher loop
try:
while not self.terminate_flag:
master_lock = self._lm.lock('dispatch.master', self.config.unique_name, self.config.master_timeout, True)
if master_lock:
if not self.is_master:
info("{} is now the master dispatcher".format(self.config.name))
self.is_master = True
self.start_dispatch_timers()
devices = self.fetch_immediate_device_list()
for device in devices:
device_id = device[0]
group = device[1]
if device[2]: # polling
self.dispatch_immediate_polling(device_id, group)
if device[3]: # discovery
self.dispatch_immediate_discovery(device_id, group)
else:
if self.is_master:
info("{} is no longer the master dispatcher".format(self.config.name))
self.stop_dispatch_timers()
self.is_master = False # no longer master
sleep(self.config.master_resolution)
except KeyboardInterrupt:
pass
info("Dispatch loop terminated")
self.shutdown()
# ------------ Discovery ------------
def dispatch_immediate_discovery(self, device_id, group):
if self.discovery_manager.get_queue(group).empty() and not self.discovery_is_locked(device_id):
self.discovery_manager.post_work(device_id, group)
def dispatch_discovery(self):
devices = self.fetch_device_list()
for device in devices:
self.discovery_manager.post_work(device[0], device[1])
def discover_device(self, device_id):
if self.lock_discovery(device_id):
try:
with TimeitContext.start() as t:
info("Discovering device {}".format(device_id))
self.call_script('discovery.php', ('-h', device_id))
info('Discovery complete {}'.format(device_id))
self.report_execution_time(t.delta(), 'discovery')
except subprocess.CalledProcessError as e:
if e.returncode == 5:
info("Device {} is down, cannot discover, waiting {}s for retry"
.format(device_id, self.config.down_retry))
self.lock_discovery(device_id, True)
else:
self.unlock_discovery(device_id)
else:
self.unlock_discovery(device_id)
# ------------ Alerting ------------
def dispatch_alerting(self):
self.alerting_manager.post_work('alerts', 0)
def poll_alerting(self, _=None):
try:
info("Checking alerts")
self.call_script('alerts.php')
except subprocess.CalledProcessError as e:
if e.returncode == 1:
warning("There was an error issuing alerts: {}".format(e.output))
else:
raise
# ------------ Services ------------
def dispatch_services(self):
devices = self.fetch_services_device_list()
for device in devices:
self.services_manager.post_work(device[0], device[1])
def poll_services(self, device_id):
if self.lock_services(device_id):
try:
with TimeitContext.start() as t:
info("Checking services on device {}".format(device_id))
self.call_script('check-services.php', ('-h', device_id))
info('Services complete {}'.format(device_id))
self.report_execution_time(t.delta(), 'services')
except subprocess.CalledProcessError as e:
if e.returncode == 5:
info("Device {} is down, cannot poll service, waiting {}s for retry"
.format(device_id, self.config.down_retry))
self.lock_services(device_id, True)
else:
self.unlock_services(device_id)
else:
self.unlock_services(device_id)
# ------------ Billing ------------
def dispatch_calculate_billing(self):
self.billing_manager.post_work('calculate', 0)
def dispatch_poll_billing(self):
self.billing_manager.post_work('poll', 0)
def poll_billing(self, run_type):
if run_type == 'poll':
info("Polling billing")
self.call_script('poll-billing.php')
info("Polling billing complete")
else: # run_type == 'calculate'
info("Calculating billing")
self.call_script('billing-calculate.php')
info("Calculating billing complete")
# ------------ Polling ------------
def dispatch_immediate_polling(self, device_id, group):
if self.poller_manager.get_queue(group).empty() and not self.polling_is_locked(device_id):
self.poller_manager.post_work(device_id, group)
if self.config.debug:
cur_time = time.time()
elapsed = cur_time - self.last_poll.get(device_id, cur_time)
self.last_poll[device_id] = time.time()
# arbitrary limit to reduce spam
if elapsed > (self.config.poller.frequency - self.config.master_resolution):
debug("Dispatching polling for device {}, time since last poll {:.2f}s"
.format(device_id, elapsed))
def poll_device(self, device_id):
if self.lock_polling(device_id):
info('Polling device {}'.format(device_id))
try:
with TimeitContext.start() as t:
self.call_script('poller.php', ('-h', device_id))
self.report_execution_time(t.delta(), 'poller')
except subprocess.CalledProcessError as e:
if e.returncode == 6:
warning('Polling device {} unreachable, waiting {}s for retry'.format(device_id, self.config.down_retry))
# re-lock to set retry timer
self.lock_polling(device_id, True)
else:
error('Polling device {} failed! {}'.format(device_id, e))
self.unlock_polling(device_id)
else:
info('Polling complete {}'.format(device_id))
# self.polling_unlock(device_id)
else:
debug('Tried to poll {}, but it is locked'.format(device_id))
def fetch_services_device_list(self):
return self._services_db.query("SELECT DISTINCT(`device_id`), `poller_group` FROM `services`"
" LEFT JOIN `devices` USING (`device_id`) WHERE `disabled`=0")
def fetch_device_list(self):
return self._discovery_db.query("SELECT `device_id`, `poller_group` FROM `devices` WHERE `disabled`=0")
def fetch_immediate_device_list(self):
poller_find_time = self.config.poller.frequency - 1
discovery_find_time = self.config.discovery.frequency - 1
return self._db.query('''SELECT `device_id`,
`poller_group`,
COALESCE(`last_polled` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_polled_timetaken` SECOND), 1) AS `poll`,
COALESCE(`last_discovered` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_discovered_timetaken` SECOND), 1) AS `discover`
FROM `devices`
WHERE `disabled` = 0 AND (
`last_polled` IS NULL OR
`last_discovered` IS NULL OR
`last_polled` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_polled_timetaken` SECOND) OR
`last_discovered` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_discovered_timetaken` SECOND)
)
ORDER BY `last_polled_timetaken` DESC''', (poller_find_time, discovery_find_time, poller_find_time, discovery_find_time))
def run_maintenance(self):
"""
Runs update and cleanup tasks by calling daily.sh. Reloads the python script after the update.
Sets a schema-update lock so no distributed pollers will update until the schema has been updated.
"""
attempt = 0
wait = 5
max_runtime = 86100
max_tries = int(max_runtime / wait)
info("Waiting for schema lock")
while not self._lm.lock('schema-update', self.config.unique_name, max_runtime):
attempt += 1
if attempt >= max_tries: # don't get stuck indefinitely
warning('Reached max wait for other pollers to update, updating now')
break
sleep(wait)
info("Running maintenance tasks")
output = self.call_script('daily.sh')
info("Maintenance tasks complete\n{}".format(output))
self.restart()
# Lock Helpers #
def lock_discovery(self, device_id, retry=False):
lock_name = self.gen_lock_name('discovery', device_id)
timeout = self.config.down_retry if retry else LibreNMS.normalize_wait(self.config.discovery.frequency)
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_discovery(self, device_id):
lock_name = self.gen_lock_name('discovery', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def discovery_is_locked(self, device_id):
lock_name = self.gen_lock_name('discovery', device_id)
return self._lm.check_lock(lock_name)
def lock_polling(self, device_id, retry=False):
lock_name = self.gen_lock_name('polling', device_id)
timeout = self.config.down_retry if retry else self.config.poller.frequency
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_polling(self, device_id):
lock_name = self.gen_lock_name('polling', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def polling_is_locked(self, device_id):
lock_name = self.gen_lock_name('polling', device_id)
return self._lm.check_lock(lock_name)
def lock_services(self, device_id, retry=False):
lock_name = self.gen_lock_name('services', device_id)
timeout = self.config.down_retry if retry else self.config.services.frequency
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_services(self, device_id):
lock_name = self.gen_lock_name('services', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def services_is_locked(self, device_id):
lock_name = self.gen_lock_name('services', device_id)
return self._lm.check_lock(lock_name)
@staticmethod
def gen_lock_name(lock_class, device_id):
return '{}.device.{}'.format(lock_class, device_id)
def gen_lock_owner(self):
return "{}-{}".format(self.config.unique_name, threading.current_thread().name)
def call_script(self, script, args=()):
"""
Run a LibreNMS script. Captures all output and throws an exception if a non-zero
status is returned. Blocks parent signals (like SIGINT and SIGTERM).
:param script: the name of the executable relative to the base directory
:param args: a tuple of arguments to send to the command
:returns the output of the command
"""
if script.endswith('.php'):
# save calling the sh process
base = ('/usr/bin/env', 'php')
else:
base = ()
cmd = base + ("{}/{}".format(self.config.BASE_DIR, script),) + tuple(map(str, args))
# preexec_fn=os.setsid here keeps process signals from propagating
return subprocess.check_output(cmd, stderr=subprocess.STDOUT, preexec_fn=os.setsid, close_fds=True).decode()
def create_lock_manager(self):
"""
Create a new LockManager. Tries to create a Redis LockManager, but falls
back to python's internal threading lock implementation.
Exits if distributing poller is enabled and a Redis LockManager cannot be created.
:return: Instance of LockManager
"""
try:
return LibreNMS.RedisLock(namespace='librenms.lock',
host=self.config.redis_host,
port=self.config.redis_port,
db=self.config.redis_db,
password=self.config.redis_pass,
unix_socket_path=self.config.redis_socket)
except ImportError:
if self.config.distributed:
critical("ERROR: Redis connection required for distributed polling")
critical("Please install redis-py, either through your os software repository or from PyPI")
sys.exit(2)
except Exception as e:
if self.config.distributed:
critical("ERROR: Redis connection required for distributed polling")
critical("Could not connect to Redis. {}".format(e))
sys.exit(2)
return LibreNMS.ThreadingLock()
def restart(self):
"""
Stop then recreate this entire process by re-calling the original script.
Has the effect of reloading the python files from disk.
"""
if sys.version_info < (3, 4, 0):
warning("Skipping restart as running under an incompatible interpreter")
warning("Please restart manually")
return
info('Restarting service... ')
self._stop_managers_and_wait()
self._lm.unlock('dispatch.master', self.config.unique_name)
python = sys.executable
os.execl(python, python, *sys.argv)
def terminate(self, _unused=None, _=None):
"""
Handle a set the terminate flag to begin a clean shutdown
:param _unused:
:param _:
"""
info("Received SIGTERM on thead %s, handling", threading.current_thread().name)
self.terminate_flag = True
def shutdown(self, _unused=None, _=None):
"""
Stop and exit, waiting for all child processes to exit.
:param _unused:
:param _:
"""
info('Shutting down, waiting for running jobs to complete...')
self.stop_dispatch_timers()
self._lm.unlock('dispatch.master', self.config.unique_name)
self.daily_timer.stop()
self.stats_timer.stop()
self._stop_managers_and_wait()
# try to release master lock
info('Shutdown of %s/%s complete', os.getpid(), threading.current_thread().name)
sys.exit(0)
def start_dispatch_timers(self):
"""
Start all dispatch timers and begin pushing events into queues.
This should only be started when we are the master dispatcher.
"""
self.alerting_manager.start_dispatch()
self.billing_manager.start_dispatch()
self.services_manager.start_dispatch()
self.discovery_manager.start_dispatch()
def stop_dispatch_timers(self):
"""
Stop all dispatch timers, this should be called when we are no longer the master dispatcher.
"""
self.alerting_manager.stop_dispatch()
self.billing_manager.stop_dispatch()
self.services_manager.stop_dispatch()
self.discovery_manager.stop_dispatch()
def _stop_managers_and_wait(self):
"""
Stop all QueueManagers, and wait for their processing threads to complete.
We send the stop signal to all QueueManagers first, then wait for them to finish.
"""
self.discovery_manager.stop()
self.poller_manager.stop()
self.services_manager.stop()
self.billing_manager.stop()
self.discovery_manager.stop_and_wait()
self.poller_manager.stop_and_wait()
self.services_manager.stop_and_wait()
self.billing_manager.stop_and_wait()
def check_single_instance(self):
"""
Check that there is only one instance of the service running on this computer.
We do this be creating a file in the base directory (.lock.service) if it doesn't exist and
obtaining an exclusive lock on that file.
"""
lock_file = "{}/{}".format(self.config.BASE_DIR, '.lock.service')
import fcntl
self._fp = open(lock_file, 'w') # keep a reference so the file handle isn't garbage collected
self._fp.flush()
try:
fcntl.lockf(self._fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
warning("Another instance is already running, quitting.")
exit(2)
def report_execution_time(self, time, activity):
self.performance_stats[activity].add(time)
def log_performance_stats(self):
info("Counting up time spent polling")
try:
# Report on the poller instance as a whole
self._db.query('INSERT INTO poller_cluster(node_id, poller_name, poller_version, poller_groups, last_report, master) '
'values("{0}", "{1}", "{2}", "{3}", NOW(), {4}) '
'ON DUPLICATE KEY UPDATE poller_version="{2}", poller_groups="{3}", last_report=NOW(), master={4}; '
.format(self.config.node_id, self.config.name, "librenms-service", ','.join(str(g) for g in self.config.group), 1 if self.is_master else 0))
# Find our ID
self._db.query('SELECT id INTO @parent_poller_id FROM poller_cluster WHERE node_id="{0}"; '.format(self.config.node_id))
for worker_type, counter in self.performance_stats.items():
worker_seconds, devices = counter.reset()
# Record the queue state
self._db.query('INSERT INTO poller_cluster_stats(parent_poller, poller_type, depth, devices, worker_seconds, workers, frequency) '
'values(@parent_poller_id, "{0}", {1}, {2}, {3}, {4}, {5}) '
'ON DUPLICATE KEY UPDATE depth={1}, devices={2}, worker_seconds={3}, workers={4}, frequency={5}; '
.format(worker_type,
sum([getattr(self, ''.join([worker_type, '_manager'])).get_queue(group).qsize() for group in self.config.group]),
devices,
worker_seconds,
getattr(self.config, worker_type).workers,
getattr(self.config, worker_type).frequency)
)
except Exception:
exception("Unable to log performance statistics - is the database still online?")
| gpl-3.0 |
m11s/MissionPlanner | Lib/ssl.py | 50 | 16075 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
except ImportError:
pass
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
from socket import socket, _fileobject, _delegate_methods, error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._connected = False
self._sslobj = None
else:
# yes, create the SSL object
self._connected = True
self._sslobj = _ssl.sslwrap(self._sock, server_side,
keyfile, certfile,
cert_reqs, ssl_version, ca_certs,
ciphers)
if do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return self._sock.send(data, flags)
def sendto(self, data, flags_or_addr, addr=None):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return self._sock.sendto(data, flags_or_addr)
else:
return self._sock.sendto(data, flags_or_addr, addr)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return self._sock.recv(buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return self._sock.recv_into(buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom(buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom_into(buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def _real_connect(self, addr, return_errno):
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs, self.ciphers)
try:
socket.connect(self, addr)
if self.do_handshake_on_connect:
self.do_handshake()
except socket_error as e:
if return_errno:
return e.errno
else:
self._sslobj = None
raise e
self._connected = True
return 0
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
ciphers=self.ciphers,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE,
PROTOCOL_SSLv23, None)
try:
sock.getpeername()
except socket_error:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
| gpl-3.0 |
JulyKikuAkita/PythonPrac | cs15211/24Game.py | 1 | 4885 | __source__ = 'https://leetcode.com/problems/24-game/description/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 679. 24 Game
#
# You have 4 cards each containing a number from 1 to 9.
# You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
#
# Example 1:
# Input: [4, 1, 8, 7]
# Output: True
# Explanation: (8-4) * (7-1) = 24
# Example 2:
# Input: [1, 2, 1, 2]
# Output: False
# Note:
# The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
# Every operation done is between two numbers. In particular, we cannot use - as a unary operator.
#
# For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
# You cannot concatenate numbers together.
# For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
#
# Companies
# Google
# Related Topics
# Depth-first Search
#
#868ms 6.09%
import unittest
import itertools
from operator import truediv, mul, add, sub
from fractions import Fraction
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def apply(A, B):
ans = set()
for x, y, op in itertools.product(A, B, (truediv, mul, add, sub)):
if op is not truediv or y: ans.add(op(x, y))
if op is not truediv or x: ans.add(op(y, x))
return ans
A = [{x} for x in map(Fraction, nums)]
for i, j in itertools.combinations(range(4), 2):
r1 = apply(A[i], A[j])
k, l = {0, 1, 2, 3} - {i, j}
if 24 in apply(apply(r1, A[k]), A[l]): return True
if 24 in apply(apply(r1, A[l]), A[k]): return True
if 24 in apply(r1, apply(A[k], A[l])): return True
return False
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/24-game/solution/
Given: (a, b, c, d) - (A tuple of 4)
Generate:
((a+b),c,d) ((a-b),c,d) ((b-a),c,d) ((a*b),c,d) ((a/b),c,d) ((b/a),c,d)
((a+c),b,d) ................................................................. ((c/a),b,d)
((a+d),b,c) ................................................................. ((d/a),b,c)
(a,(b+c),d) ................................................................. (a,(c/b),d)
(a,(b+d),d) ................................................................. (a,(d/b),d)
(a,b,(c+d)) ................................................................. (a,b,(d/c))
There are 36 (6*6) such tuples. Of these, + & - are not order dependent. That is 2+3 = 3+2.
But / & - are order dependent. i.e. 2/3 != 3/2. These look like (e,f,g) i.e. a tuple of 3 now.
Carrying out similar reductions gives 18 (6*3) tuples for each of the above-generated tuples.
These now look like (h, i) i.e. a tuple of 2 now.
Similiar, the final reduction now yields 6 answers (a+b, a-b, a*b, a/b, b-a, b/a)
for each of the above-generated tuple.
Thus in total 36x18x6 final values can be generated using the 4 operators and 4 initial values.
Algo: Generate all such answers using dfs method and stop when it's 24.
Catches:
Use double instead of int
Be careful about the classical divide by zero error
#18ms 56.03%
class Solution {
public boolean judgePoint24(int[] nums) {
ArrayList A = new ArrayList<Double>();
for (int v: nums) A.add((double) v);
return solve(A);
}
private boolean solve(ArrayList<Double> nums) {
if (nums.size() == 0) return false;
if (nums.size() == 1) return Math.abs(nums.get(0) - 24) < 1e-6;
for (int i = 0; i < nums.size(); i++) {
for (int j = 0; j < nums.size(); j++) {
if (i != j) {
ArrayList<Double> nums2 = new ArrayList<Double>();
for (int k = 0; k < nums.size(); k++) if (k != i && k != j) {
nums2.add(nums.get(k));
}
for (int k = 0; k < 4; k++) {
if (k < 2 && j > i) continue;
if (k == 0) nums2.add(nums.get(i) + nums.get(j));
if (k == 1) nums2.add(nums.get(i) * nums.get(j));
if (k == 2) nums2.add(nums.get(i) - nums.get(j));
if (k == 3) {
if (nums.get(j) != 0) {
nums2.add(nums.get(i) / nums.get(j));
} else {
continue;
}
}
if (solve(nums2)) return true;
nums2.remove(nums2.size() - 1);
}
}
}
}
return false;
}
}
''' | apache-2.0 |
TeamTwisted/external_chromium_org | chrome/common/extensions/docs/server2/gitiles_file_system_test.py | 41 | 5259 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import unittest
from extensions_paths import SERVER2
from file_system import StatInfo
from future import Future
from gitiles_file_system import (_CreateStatInfo,
_ParseGitilesJson,
GitilesFileSystem)
from path_util import IsDirectory
from test_file_system import TestFileSystem
from test_util import ReadFile
_BASE_URL = ''
_REAL_DATA_DIR = 'chrome/common/extensions/docs/templates/public/extensions/'
_TEST_DATA = (SERVER2, 'test_data', 'gitiles_file_system', 'public_extensions')
# GitilesFileSystem expects file content to be encoded in base64.
_TEST_FS = {
'test1.txt': base64.b64encode('test1'),
'dir1': {
'test2.txt': base64.b64encode('test2'),
'dir2': {
'test3.txt': base64.b64encode('test3')
}
}
}
class _Response(object):
def __init__(self, content=''):
self.content = content
self.status_code = 200
class DownloadError(Exception):
pass
class _FakeGitilesFetcher(object):
def __init__(self, fs):
self._fs = fs
def FetchAsync(self, url, access_token=None):
def resolve():
assert '?' in url
if url == _BASE_URL + '?format=JSON':
return _Response(json.dumps({'commit': 'a_commit'}))
path, fmt = url.split('?')
# Fetch urls are of the form <base_url>/<path>. We only want <path>.
path = path.split('/', 1)[1]
if path == _REAL_DATA_DIR:
return _Response(ReadFile(*_TEST_DATA))
# ALWAYS skip not found here.
content = self._fs.Read((path,),
skip_not_found=True).Get().get(path, None)
if content is None:
# GitilesFS expects a DownloadError if the file wasn't found.
raise DownloadError
# GitilesFS expects directory content as a JSON string.
if 'JSON' in fmt:
content = json.dumps({
'entries': [{
# GitilesFS expects directory names to not have a trailing '/'.
'name': name.rstrip('/'),
'type': 'tree' if IsDirectory(name) else 'blob'
} for name in content]
})
return _Response(content)
return Future(callback=resolve)
class GitilesFileSystemTest(unittest.TestCase):
def setUp(self):
fetcher = _FakeGitilesFetcher(TestFileSystem(_TEST_FS))
self._gitiles_fs = GitilesFileSystem(fetcher, _BASE_URL, 'master', None)
def testParseGitilesJson(self):
test_json = '\n'.join([
')]}\'',
json.dumps({'commit': 'blah'})
])
self.assertEqual(_ParseGitilesJson(test_json), {'commit': 'blah'})
def testCreateStatInfo(self):
test_json = '\n'.join([
')]}\'',
json.dumps({
'id': 'some_long_string',
'entries': [
{
'mode': 33188,
'type': 'blob',
'id': 'long_id',
'name': '.gitignore'
},
{
'mode': 33188,
'type': 'blob',
'id': 'another_long_id',
'name': 'PRESUBMIT.py'
},
{
'mode': 33188,
'type': 'blob',
'id': 'yali',
'name': 'README'
}
]
})
])
expected_stat_info = StatInfo('some_long_string', {
'.gitignore': 'long_id',
'PRESUBMIT.py': 'another_long_id',
'README': 'yali'
})
self.assertEqual(_CreateStatInfo(test_json), expected_stat_info)
def testRead(self):
# Read a top-level file.
f = self._gitiles_fs.Read(['test1.txt'])
self.assertEqual(f.Get(), {'test1.txt': 'test1'})
# Read a top-level directory.
f = self._gitiles_fs.Read(['dir1/'])
self.assertEqual(f.Get(), {'dir1/': sorted(['test2.txt', 'dir2/'])})
# Read a nested file.
f = self._gitiles_fs.Read(['dir1/test2.txt'])
self.assertEqual(f.Get(), {'dir1/test2.txt': 'test2'})
# Read a nested directory.
f = self._gitiles_fs.Read(['dir1/dir2/'])
self.assertEqual(f.Get(), {'dir1/dir2/': ['test3.txt']})
# Read multiple paths.
f = self._gitiles_fs.Read(['test1.txt', 'dir1/test2.txt'])
self.assertEqual(f.Get(), {'test1.txt': 'test1', 'dir1/test2.txt': 'test2'})
# Test skip not found.
f = self._gitiles_fs.Read(['fakefile'], skip_not_found=True)
self.assertEqual(f.Get(), {})
def testGetCommitID(self):
self.assertEqual(self._gitiles_fs.GetCommitID().Get(), 'a_commit')
def testStat(self):
self.assertEqual(self._gitiles_fs.Stat(_REAL_DATA_DIR).version,
'ec21e736a3f00db2c0580e3cf71d91951656caec')
def testGetIdentity(self):
# Test that file systems at different commits still have the same identity.
other_gitiles_fs = GitilesFileSystem.Create(commit='abcdefghijklmnop')
self.assertEqual(self._gitiles_fs.GetIdentity(),
other_gitiles_fs.GetIdentity())
yet_another_gitiles_fs = GitilesFileSystem.Create(branch='different')
self.assertNotEqual(self._gitiles_fs.GetIdentity(),
yet_another_gitiles_fs.GetIdentity())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
vodik/pyroute2 | pyroute2/netlink/taskstats/__init__.py | 7 | 5936 | '''
TaskStats module
================
All that you should know about TaskStats, is that you should not
use it. But if you have to, ok::
import os
from pyroute2 import TaskStats
ts = TaskStats()
ts.get_pid_stat(os.getpid())
It is not implemented normally yet, but some methods are already
usable.
'''
from pyroute2.netlink import NLM_F_REQUEST
from pyroute2.netlink import nla
from pyroute2.netlink import genlmsg
from pyroute2.netlink.generic import GenericNetlinkSocket
TASKSTATS_CMD_UNSPEC = 0 # Reserved
TASKSTATS_CMD_GET = 1 # user->kernel request/get-response
TASKSTATS_CMD_NEW = 2
class tcmd(genlmsg):
nla_map = (('TASKSTATS_CMD_ATTR_UNSPEC', 'none'),
('TASKSTATS_CMD_ATTR_PID', 'uint32'),
('TASKSTATS_CMD_ATTR_TGID', 'uint32'),
('TASKSTATS_CMD_ATTR_REGISTER_CPUMASK', 'asciiz'),
('TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK', 'asciiz'))
class tstats(nla):
pack = "struct"
fields = (('version', 'H'), # 2
('ac_exitcode', 'I'), # 4
('ac_flag', 'B'), # 1
('ac_nice', 'B'), # 1 --- 10
('cpu_count', 'Q'), # 8
('cpu_delay_total', 'Q'), # 8
('blkio_count', 'Q'), # 8
('blkio_delay_total', 'Q'), # 8
('swapin_count', 'Q'), # 8
('swapin_delay_total', 'Q'), # 8
('cpu_run_real_total', 'Q'), # 8
('cpu_run_virtual_total', 'Q'), # 8
('ac_comm', '32s'), # 32 +++ 112
('ac_sched', 'B'), # 1
('__pad', '3x'), # 1 --- 8 (!)
('ac_uid', 'I'), # 4 +++ 120
('ac_gid', 'I'), # 4
('ac_pid', 'I'), # 4
('ac_ppid', 'I'), # 4
('ac_btime', 'I'), # 4 +++ 136
('ac_etime', 'Q'), # 8 +++ 144
('ac_utime', 'Q'), # 8
('ac_stime', 'Q'), # 8
('ac_minflt', 'Q'), # 8
('ac_majflt', 'Q'), # 8
('coremem', 'Q'), # 8
('virtmem', 'Q'), # 8
('hiwater_rss', 'Q'), # 8
('hiwater_vm', 'Q'), # 8
('read_char', 'Q'), # 8
('write_char', 'Q'), # 8
('read_syscalls', 'Q'), # 8
('write_syscalls', 'Q'), # 8
('read_bytes', 'Q'), # ...
('write_bytes', 'Q'),
('cancelled_write_bytes', 'Q'),
('nvcsw', 'Q'),
('nivcsw', 'Q'),
('ac_utimescaled', 'Q'),
('ac_stimescaled', 'Q'),
('cpu_scaled_run_real_total', 'Q'))
def decode(self):
nla.decode(self)
self['ac_comm'] = self['ac_comm'][:self['ac_comm'].find('\0')]
class taskstatsmsg(genlmsg):
nla_map = (('TASKSTATS_TYPE_UNSPEC', 'none'),
('TASKSTATS_TYPE_PID', 'uint32'),
('TASKSTATS_TYPE_TGID', 'uint32'),
('TASKSTATS_TYPE_STATS', 'stats'),
('TASKSTATS_TYPE_AGGR_PID', 'aggr_pid'),
('TASKSTATS_TYPE_AGGR_TGID', 'aggr_tgid'))
class stats(tstats):
pass # FIXME: optimize me!
class aggr_id(nla):
nla_map = (('TASKSTATS_TYPE_UNSPEC', 'none'),
('TASKSTATS_TYPE_PID', 'uint32'),
('TASKSTATS_TYPE_TGID', 'uint32'),
('TASKSTATS_TYPE_STATS', 'stats'))
class stats(tstats):
pass
class aggr_pid(aggr_id):
pass
class aggr_tgid(aggr_id):
pass
class TaskStats(GenericNetlinkSocket):
def __init__(self):
GenericNetlinkSocket.__init__(self)
def bind(self):
GenericNetlinkSocket.bind(self, 'TASKSTATS', taskstatsmsg)
def get_pid_stat(self, pid):
'''
Get taskstats for a process. Pid should be an integer.
'''
msg = tcmd()
msg['cmd'] = TASKSTATS_CMD_GET
msg['version'] = 1
msg['attrs'].append(['TASKSTATS_CMD_ATTR_PID', pid])
return self.nlm_request(msg,
self.prid,
msg_flags=NLM_F_REQUEST)
def _register_mask(self, cmd, mask):
msg = tcmd()
msg['cmd'] = TASKSTATS_CMD_GET
msg['version'] = 1
msg['attrs'].append([cmd, mask])
# there is no response to this request
self.put(msg,
self.prid,
msg_flags=NLM_F_REQUEST)
def register_mask(self, mask):
'''
Start the accounting for a processors by a mask. Mask is
a string, e.g.::
0,1 -- first two CPUs
0-4,6-10 -- CPUs from 0 to 4 and from 6 to 10
Though the kernel has a procedure, that cleans up accounting,
when it is not used, it is recommended to run deregister_mask()
before process exit.
'''
self._register_mask('TASKSTATS_CMD_ATTR_REGISTER_CPUMASK',
mask)
def deregister_mask(self, mask):
'''
Stop the accounting.
'''
self._register_mask('TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK',
mask)
| apache-2.0 |
glovebx/odoo | addons/point_of_sale/report/pos_report.py | 381 | 6370 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class report_transaction_pos(osv.osv):
_name = "report.transaction.pos"
_description = "transaction for the pos"
_auto = False
_columns = {
'date_create': fields.char('Date', size=16, readonly=True),
'journal_id': fields.many2one('account.journal', 'Sales Journal', readonly=True),
'jl_id': fields.many2one('account.journal', 'Cash Journals', readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'no_trans': fields.float('Number of Transaction', readonly=True),
'amount': fields.float('Amount', readonly=True),
'invoice_id': fields.float('Nbr Invoice', readonly=True),
'invoice_am': fields.float('Invoice Amount', readonly=True),
'product_nb': fields.float('Product Nb.', readonly=True),
'disc': fields.float('Disc.', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_transaction_pos')
cr.execute("""
create or replace view report_transaction_pos as (
select
min(absl.id) as id,
count(absl.id) as no_trans,
sum(absl.amount) as amount,
sum((100.0-line.discount) * line.price_unit * line.qty / 100.0) as disc,
to_char(date_trunc('day',absl.create_date),'YYYY-MM-DD')::text as date_create,
po.user_id as user_id,
po.sale_journal as journal_id,
abs.journal_id as jl_id,
count(po.invoice_id) as invoice_id,
count(p.id) as product_nb
from
account_bank_statement_line as absl,
account_bank_statement as abs,
product_product as p,
pos_order_line as line,
pos_order as po
where
absl.pos_statement_id = po.id and
line.order_id=po.id and
line.product_id=p.id and
absl.statement_id=abs.id
group by
po.user_id,po.sale_journal, abs.journal_id,
to_char(date_trunc('day',absl.create_date),'YYYY-MM-DD')::text
)
""")
#to_char(date_trunc('day',absl.create_date),'YYYY-MM-DD')
#to_char(date_trunc('day',absl.create_date),'YYYY-MM-DD')::text as date_create,
class report_sales_by_user_pos(osv.osv):
_name = "report.sales.by.user.pos"
_description = "Sales by user"
_auto = False
_columns = {
'date_order': fields.date('Order Date',required=True, select=True),
'amount': fields.float('Total', readonly=True, select=True),
'qty': fields.float('Quantity', readonly=True, select=True),
'user_id': fields.many2one('res.users', 'User', readonly=True, select=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_sales_by_user_pos')
cr.execute("""
create or replace view report_sales_by_user_pos as (
select
min(po.id) as id,
to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::text as date_order,
po.user_id as user_id,
sum(pol.qty)as qty,
sum((pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0))) as amount
from
pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt
where
pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id
group by
to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::text,
po.user_id
)
""")
class report_sales_by_user_pos_month(osv.osv):
_name = "report.sales.by.user.pos.month"
_description = "Sales by user monthly"
_auto = False
_columns = {
'date_order': fields.date('Order Date',required=True, select=True),
'amount': fields.float('Total', readonly=True, select=True),
'qty': fields.float('Quantity', readonly=True, select=True),
'user_id': fields.many2one('res.users', 'User', readonly=True, select=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_sales_by_user_pos_month')
cr.execute("""
create or replace view report_sales_by_user_pos_month as (
select
min(po.id) as id,
to_char(date_trunc('month',po.date_order),'YYYY-MM-DD')::text as date_order,
po.user_id as user_id,
sum(pol.qty)as qty,
sum((pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0))) as amount
from
pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt
where
pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id
group by
to_char(date_trunc('month',po.date_order),'YYYY-MM-DD')::text,
po.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
caisq/tensorflow | tensorflow/contrib/learn/python/learn/utils/export.py | 14 | 13875 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, training_util.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
tf_saver.latest_checkpoint(estimator._model_dir))
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
| apache-2.0 |
Vishluck/sympy | sympy/polys/tests/test_polyutils.py | 46 | 10845 | """Tests for useful utilities for higher level polynomial classes. """
from sympy import S, Integer, sin, cos, sqrt, symbols, pi, Eq, Integral, exp
from sympy.utilities.pytest import raises
from sympy.polys.polyutils import (
_nsort,
_sort_gens,
_unify_gens,
_analyze_gens,
_sort_factors,
parallel_dict_from_expr,
dict_from_expr,
)
from sympy.polys.polyerrors import (
GeneratorsNeeded,
PolynomialError,
)
from sympy.polys.domains import ZZ
x, y, z, p, q, r, s, t, u, v, w = symbols('x,y,z,p,q,r,s,t,u,v,w')
A, B = symbols('A,B', commutative=False)
def test__nsort():
# issue 6137
r = S('''[3/2 + sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) - 4/sqrt(-7/3 +
61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3)) -
61/(18*(-415/216 + 13*I/12)**(1/3)))/2 - sqrt(-7/3 + 61/(18*(-415/216
+ 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3))/2, 3/2 - sqrt(-7/3
+ 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3))/2 - sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) -
4/sqrt(-7/3 + 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3)) - 61/(18*(-415/216 + 13*I/12)**(1/3)))/2, 3/2 +
sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) + 4/sqrt(-7/3 +
61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3)) -
61/(18*(-415/216 + 13*I/12)**(1/3)))/2 + sqrt(-7/3 + 61/(18*(-415/216
+ 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3))/2, 3/2 + sqrt(-7/3
+ 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3))/2 - sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) +
4/sqrt(-7/3 + 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3)) - 61/(18*(-415/216 + 13*I/12)**(1/3)))/2]''')
ans = [r[1], r[0], r[-1], r[-2]]
assert _nsort(r) == ans
assert len(_nsort(r, separated=True)[0]) == 0
b, c, a = exp(-1000), exp(-999), exp(-1001)
assert _nsort((b, c, a)) == [a, b, c]
def test__sort_gens():
assert _sort_gens([]) == ()
assert _sort_gens([x]) == (x,)
assert _sort_gens([p]) == (p,)
assert _sort_gens([q]) == (q,)
assert _sort_gens([x, p]) == (x, p)
assert _sort_gens([p, x]) == (x, p)
assert _sort_gens([q, p]) == (p, q)
assert _sort_gens([q, p, x]) == (x, p, q)
assert _sort_gens([x, p, q], wrt=x) == (x, p, q)
assert _sort_gens([x, p, q], wrt=p) == (p, x, q)
assert _sort_gens([x, p, q], wrt=q) == (q, x, p)
assert _sort_gens([x, p, q], wrt='x') == (x, p, q)
assert _sort_gens([x, p, q], wrt='p') == (p, x, q)
assert _sort_gens([x, p, q], wrt='q') == (q, x, p)
assert _sort_gens([x, p, q], wrt='x,q') == (x, q, p)
assert _sort_gens([x, p, q], wrt='q,x') == (q, x, p)
assert _sort_gens([x, p, q], wrt='p,q') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q,p') == (q, p, x)
assert _sort_gens([x, p, q], wrt='x, q') == (x, q, p)
assert _sort_gens([x, p, q], wrt='q, x') == (q, x, p)
assert _sort_gens([x, p, q], wrt='p, q') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q, p') == (q, p, x)
assert _sort_gens([x, p, q], wrt=[x, 'q']) == (x, q, p)
assert _sort_gens([x, p, q], wrt=[q, 'x']) == (q, x, p)
assert _sort_gens([x, p, q], wrt=[p, 'q']) == (p, q, x)
assert _sort_gens([x, p, q], wrt=[q, 'p']) == (q, p, x)
assert _sort_gens([x, p, q], wrt=['x', 'q']) == (x, q, p)
assert _sort_gens([x, p, q], wrt=['q', 'x']) == (q, x, p)
assert _sort_gens([x, p, q], wrt=['p', 'q']) == (p, q, x)
assert _sort_gens([x, p, q], wrt=['q', 'p']) == (q, p, x)
assert _sort_gens([x, p, q], sort='x > p > q') == (x, p, q)
assert _sort_gens([x, p, q], sort='p > x > q') == (p, x, q)
assert _sort_gens([x, p, q], sort='p > q > x') == (p, q, x)
assert _sort_gens([x, p, q], wrt='x', sort='q > p') == (x, q, p)
assert _sort_gens([x, p, q], wrt='p', sort='q > x') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q', sort='p > x') == (q, p, x)
X = symbols('x0,x1,x2,x10,x11,x12,x20,x21,x22')
assert _sort_gens(X) == X
def test__unify_gens():
assert _unify_gens([], []) == ()
assert _unify_gens([x], [x]) == (x,)
assert _unify_gens([y], [y]) == (y,)
assert _unify_gens([x, y], [x]) == (x, y)
assert _unify_gens([x], [x, y]) == (x, y)
assert _unify_gens([x, y], [x, y]) == (x, y)
assert _unify_gens([y, x], [y, x]) == (y, x)
assert _unify_gens([x], [y]) == (x, y)
assert _unify_gens([y], [x]) == (y, x)
assert _unify_gens([x], [y, x]) == (y, x)
assert _unify_gens([y, x], [x]) == (y, x)
assert _unify_gens([x, y, z], [x, y, z]) == (x, y, z)
assert _unify_gens([z, y, x], [x, y, z]) == (z, y, x)
assert _unify_gens([x, y, z], [z, y, x]) == (x, y, z)
assert _unify_gens([z, y, x], [z, y, x]) == (z, y, x)
assert _unify_gens([x, y, z], [t, x, p, q, z]) == (t, x, y, p, q, z)
def test__analyze_gens():
assert _analyze_gens((x, y, z)) == (x, y, z)
assert _analyze_gens([x, y, z]) == (x, y, z)
assert _analyze_gens(([x, y, z],)) == (x, y, z)
assert _analyze_gens(((x, y, z),)) == (x, y, z)
def test__sort_factors():
assert _sort_factors([], multiple=True) == []
assert _sort_factors([], multiple=False) == []
F = [[1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [[1, 2], [1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [1, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [[2, 2], [1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [2, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([1, 2], 1), ([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([1, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([2, 2], 1), ([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([2, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([2, 2], 1), ([1, 2, 3], 1), ([1, 2], 2), ([1], 1)]
G = [([1], 1), ([2, 2], 1), ([1, 2], 2), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
def test__dict_from_expr_if_gens():
assert dict_from_expr(
Integer(17), gens=(x,)) == ({(0,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17), gens=(x, y)) == ({(0, 0): Integer(17)}, (x, y))
assert dict_from_expr(
Integer(17), gens=(x, y, z)) == ({(0, 0, 0): Integer(17)}, (x, y, z))
assert dict_from_expr(
Integer(-17), gens=(x,)) == ({(0,): Integer(-17)}, (x,))
assert dict_from_expr(
Integer(-17), gens=(x, y)) == ({(0, 0): Integer(-17)}, (x, y))
assert dict_from_expr(Integer(
-17), gens=(x, y, z)) == ({(0, 0, 0): Integer(-17)}, (x, y, z))
assert dict_from_expr(
Integer(17)*x, gens=(x,)) == ({(1,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17)*x, gens=(x, y)) == ({(1, 0): Integer(17)}, (x, y))
assert dict_from_expr(Integer(
17)*x, gens=(x, y, z)) == ({(1, 0, 0): Integer(17)}, (x, y, z))
assert dict_from_expr(
Integer(17)*x**7, gens=(x,)) == ({(7,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17)*x**7*y, gens=(x, y)) == ({(7, 1): Integer(17)}, (x, y))
assert dict_from_expr(Integer(17)*x**7*y*z**12, gens=(
x, y, z)) == ({(7, 1, 12): Integer(17)}, (x, y, z))
assert dict_from_expr(x + 2*y + 3*z, gens=(x,)) == \
({(1,): Integer(1), (0,): 2*y + 3*z}, (x,))
assert dict_from_expr(x + 2*y + 3*z, gens=(x, y)) == \
({(1, 0): Integer(1), (0, 1): Integer(2), (0, 0): 3*z}, (x, y))
assert dict_from_expr(x + 2*y + 3*z, gens=(x, y, z)) == \
({(1, 0, 0): Integer(
1), (0, 1, 0): Integer(2), (0, 0, 1): Integer(3)}, (x, y, z))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x,)) == \
({(1,): y + 2*z, (0,): 3*y*z}, (x,))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x, y)) == \
({(1, 1): Integer(1), (1, 0): 2*z, (0, 1): 3*z}, (x, y))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x, y, z)) == \
({(1, 1, 0): Integer(
1), (1, 0, 1): Integer(2), (0, 1, 1): Integer(3)}, (x, y, z))
assert dict_from_expr(2**y*x, gens=(x,)) == ({(1,): 2**y}, (x,))
assert dict_from_expr(Integral(x, (x, 1, 2)) + x) == (
{(0, 1): 1, (1, 0): 1}, (x, Integral(x, (x, 1, 2))))
raises(PolynomialError, lambda: dict_from_expr(2**y*x, gens=(x, y)))
def test__dict_from_expr_no_gens():
raises(GeneratorsNeeded, lambda: dict_from_expr(Integer(17)))
assert dict_from_expr(x) == ({(1,): Integer(1)}, (x,))
assert dict_from_expr(y) == ({(1,): Integer(1)}, (y,))
assert dict_from_expr(x*y) == ({(1, 1): Integer(1)}, (x, y))
assert dict_from_expr(
x + y) == ({(1, 0): Integer(1), (0, 1): Integer(1)}, (x, y))
assert dict_from_expr(sqrt(2)) == ({(1,): Integer(1)}, (sqrt(2),))
raises(GeneratorsNeeded, lambda: dict_from_expr(sqrt(2), greedy=False))
assert dict_from_expr(x*y, domain=ZZ[x]) == ({(1,): x}, (y,))
assert dict_from_expr(x*y, domain=ZZ[y]) == ({(1,): y}, (x,))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=None) == ({(1, 1, 1, 1): 3}, (x, y, pi, sqrt(2)))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=True) == ({(1, 1, 1): 3*sqrt(2)}, (x, y, pi))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=True) == ({(1, 1, 1): 3*sqrt(2)}, (x, y, pi))
f = cos(x)*sin(x) + cos(x)*sin(y) + cos(y)*sin(x) + cos(y)*sin(y)
assert dict_from_expr(f) == ({(0, 1, 0, 1): 1, (0, 1, 1, 0): 1,
(1, 0, 0, 1): 1, (1, 0, 1, 0): 1}, (cos(x), cos(y), sin(x), sin(y)))
def test__parallel_dict_from_expr_if_gens():
assert parallel_dict_from_expr([x + 2*y + 3*z, Integer(7)], gens=(x,)) == \
([{(1,): Integer(1), (0,): 2*y + 3*z}, {(0,): Integer(7)}], (x,))
def test__parallel_dict_from_expr_no_gens():
assert parallel_dict_from_expr([x*y, Integer(3)]) == \
([{(1, 1): Integer(1)}, {(0, 0): Integer(3)}], (x, y))
assert parallel_dict_from_expr([x*y, 2*z, Integer(3)]) == \
([{(1, 1, 0): Integer(
1)}, {(0, 0, 1): Integer(2)}, {(0, 0, 0): Integer(3)}], (x, y, z))
def test_parallel_dict_from_expr():
assert parallel_dict_from_expr([Eq(x, 1), Eq(
x**2, 2)]) == ([{(0,): -Integer(1), (1,): Integer(1)},
{(0,): -Integer(2), (2,): Integer(1)}], (x,))
raises(PolynomialError, lambda: parallel_dict_from_expr([A*B - B*A]))
def test_dict_from_expr():
assert dict_from_expr(Eq(x, 1)) == \
({(0,): -Integer(1), (1,): Integer(1)}, (x,))
raises(PolynomialError, lambda: dict_from_expr(A*B - B*A))
| bsd-3-clause |
PhonologicalCorpusTools/SLP-Annotator | slpa/gui/search.py | 1 | 34301 | from imports import (Qt, QDialog, QVBoxLayout, QHBoxLayout, QTabWidget, QPushButton, QFont, QListWidget,
QComboBox, QCheckBox, QTableWidget, QTableWidgetItem, QAbstractItemView, QFrame, QButtonGroup,
QRadioButton, QLineEdit, QMenu, QAction, QCompleter, QStringListModel)
from gui.transcriptions import TranscriptionConfigTab, TranscriptionInfo
from image import *
from constants import GLOBAL_OPTIONS, FINGER_SYMBOLS, SYMBOL_DESCRIPTIONS
FONT_NAME = 'Arial'
FONT_SIZE = 12
class ConfigComboBox(QComboBox):
def __init__(self):
super().__init__()
self.addItem('Config 1')
self.addItem('Config 2')
self.addItem('Either config')
self.addItem('Both configs')
class HandComboBox(QComboBox):
def __init__(self):
super().__init__()
self.addItem('Hand 1')
self.addItem('Hand 2')
self.addItem('Either hand')
self.addItem('Both hands')
class FingerComboBox(QComboBox):
def __init__(self, allowAnyFinger=False):
super().__init__()
self.addItem('Thumb')
self.addItem('Index')
self.addItem('Middle')
self.addItem('Ring')
self.addItem('Pinky')
if allowAnyFinger:
self.addItem('Any')
class FlexionComboBox(QComboBox):
def __init__(self):
super().__init__()
for symbol in FINGER_SYMBOLS[:-1]:
#the final description is ignored on this loop and then added afterwards
#because for the search function it's better if we re-order the options
self.addItem(SYMBOL_DESCRIPTIONS[symbol].title())
self.addItem('Extended (any)')
self.addItem('Flexed (any)')
self.addItem('Intermediate (any)')
self.addItem('Unestimatable')#this was the description ignored earlier
self.addItem('Blank')
self.setMaxVisibleItems(len(FINGER_SYMBOLS)+4)
class QuantifierComboBox(QComboBox):
def __init__(self):
super().__init__()
self.addItem('All')
self.addItem('Any')
self.addItem('None')
class JointComboBox(QComboBox):
def __init__(self):
super().__init__()
self.addItem('Proximal')
self.addItem('Medial')
self.addItem('Distal')
class JointSearchLayout(QHBoxLayout):
def __init__(self):
super().__init__()
self.quantifiers = QuantifierComboBox()
self.joints = JointComboBox()
self.flexions = FlexionComboBox()
self.fingers = FingerComboBox()
self.configs = ConfigComboBox()
self.hands = HandComboBox()
self.addWidget(QLabel('For '))
self.addWidget(self.configs)
self.addWidget(self.hands)
self.addWidget(self.quantifiers)
self.addWidget(QLabel(' of the '))
self.addWidget(self.joints)
self.addWidget(QLabel(' on the '))
self.addWidget(self.fingers)
self.addWidget(QLabel(' are '))
self.addWidget(self.flexions)
class FingerSearchLayout(QHBoxLayout):
def __init__(self, allowAnyFinger=False):
super().__init__()
self.deleteMe = QCheckBox()
self.quantifiers = QuantifierComboBox()
self.fingers = FingerComboBox(allowAnyFinger)
self.flexions = FlexionComboBox()
self.configs = ConfigComboBox()
self.hands = HandComboBox()
self.addWidget(self.deleteMe)
self.addWidget(QLabel('In '))
self.addWidget(self.configs)
self.addWidget(self.hands)
self.addWidget(self.quantifiers)
self.addWidget(QLabel(' of the joints on the '))
self.addWidget(self.fingers)
self.addWidget(QLabel(' are '))
self.addWidget(self.flexions)
def generatePhrase(self):
phrase = list()
for n in range(self.count()):
widget = self.itemAt(n).widget()
if isinstance(widget, QLabel):
phrase.append(widget.text().strip())
elif isinstance(widget, QComboBox):
phrase.append(widget.currentText())
return ' '.join(phrase)
class SearchDialog(QDialog):
def __init__(self):
super().__init__()
self.transcriptions = None
self.regularExpressions = None
def showRecentSearches(self):
dialog = RecentSearchDialog(self.recents)
dialog.exec_()
return dialog.result
def accept(self):
self.accepted = True
#the generate*() functions are implemented by TranscriptionSearchDialog and PhraseSearchDialog
self.generateTranscriptions()
self.generateRegEx()
self.generateGlobalOptions()
super().accept()
def reject(self):
self.accepted = False
super().reject()
class PhraseDialog(QDialog):
def __init__(self):
super().__init__()
self.descriptionLayouts = list()
self.introduction = QLabel()
self.introduction.setFont(QFont('Arial', 15))
#the introduction label is used by subclasses to present different information to the user
self.transcriptions = list()
self.regularExpressions = None
self.layout = QVBoxLayout()
self.layout.addWidget(self.introduction)
self.metaLayout = QVBoxLayout()
self.layout.addLayout(self.metaLayout)
sepFrame = QFrame()
sepFrame.setFrameShape(QFrame.HLine)
sepFrame.setLineWidth(2)
self.layout.addWidget(sepFrame)
self.buttonLayout = QVBoxLayout()
self.topButtonLayout = QHBoxLayout()
self.addDescription = QPushButton('')
self.addDescription.clicked.connect(self.addFingerLayout)
self.topButtonLayout.addWidget(self.addDescription)
remove = QPushButton('Remove all selected phrases')
self.topButtonLayout.addWidget(remove)
remove.clicked.connect(self.removeFingerLayouts)
self.buttonLayout.addLayout(self.topButtonLayout)
bottomButtonLayout = QHBoxLayout()
ok = QPushButton('OK')
bottomButtonLayout.addWidget(ok)
ok.clicked.connect(self.accept)
cancel = QPushButton('Cancel')
bottomButtonLayout.addWidget(cancel)
cancel.clicked.connect(self.reject)
self.buttonLayout.addLayout(bottomButtonLayout)
self.layout.addLayout(self.buttonLayout)
self.setLayout(self.layout)
def clearLayout(self, layout):
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearLayout(item.layout())
def removeFingerLayouts(self):
for n in reversed(range(len(self.descriptionLayouts))):
layout = self.metaLayout.itemAt(n)
if layout.deleteMe.isChecked():
layout = self.metaLayout.takeAt(n)
self.descriptionLayouts.pop(n)
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
def addFingerLayout(self, disable_quantifiers=False, allowAnyFinger=False):
newLayout = FingerSearchLayout(allowAnyFinger)
if disable_quantifiers:
newLayout.quantifiers.removeItem(2)
newLayout.quantifiers.removeItem(1)
newLayout.configs.removeItem(2)
newLayout.hands.removeItem(2)
self.descriptionLayouts.append(newLayout)
self.metaLayout.addLayout(newLayout)
def addJointLayout(self):
newLayout = JointSearchLayout()
self.descriptionLayouts.append(newLayout)
self.metaLayout.addLayout(newLayout)
def findSlotNumbers(self, finger):
if finger == 'thumb':
slots = [4, 5]
elif finger == 'index':
slots = [17,18,19]
elif finger == 'middle':
slots = [22, 23, 24]
elif finger == 'ring':
slots = [27, 28, 29]
elif finger == 'pinky':
slots = [32, 33, 34]
elif finger == 'any':
slots = [4, 5, 17, 18, 19, 22, 23, 24, 32, 33, 34]
return slots
def findTranscriptionSymbol(self, description):
description = description.lower()
if description == 'unestimatable':
symbol = '?'
elif description == 'blank':
symbol = ''
elif 'extended' in description:
if 'hyper' in description:
symbol = 'H'
elif 'fully' in description:
symbol = 'E'
elif 'somewhat' in description:
symbol = 'e'
else:
symbol = 'HEe'
elif 'flexed' in description:
if 'fully' in description:
symbol = 'F'
elif 'somewhat' in description:
symbol = 'f'
else:
symbol = 'Ff'
elif 'intermediate' in description:
if 'clearly' in description:
symbol = 'i'
else:
symbol = 'efi'
return symbol
def generateTranscriptions(self):
transcriptions = list()
for regex in self.regularExpressions:
t = list()
for symbol in regex:
if symbol == '.':
t.append('_')
else:
t.append(symbol)
transcriptions.append(t)
self.transcriptions = transcriptions
def generateGlobalOptions(self):
self.forearm = False
self.estimated = False
self.uncertain = False
self.incomplete = False
self.reduplicated = False
def generatePhrases(self):
self.phrases = [layout.generatePhrase() for layout in self.descriptionLayouts]
def generateRegEx(self):
mapping = {'config1hand1': (0, 'hand1Transcription'),
'config1hand2': (0, 'hand2Transcription'),
'config2hand1': (1, 'hand1Transcription'),
'config2hand2': (1, 'hand2Transcription')}
for layout in self.descriptionLayouts:
transcriptions = {'config1hand1': [None for n in range(34)],
'config1hand2': [None for n in range(34)],
'config2hand1': [None for n in range(34)],
'config2hand2': [None for n in range(34)]}
finger = layout.fingers.currentText().lower()
quantifier = layout.quantifiers.currentText().lower()
config = layout.configs.currentText().lower().replace(' ', '')
hand = layout.hands.currentText().lower().replace(' ', '')
slots = self.findSlotNumbers(layout.fingers.currentText().lower())
symbol = self.findTranscriptionSymbol(layout.flexions.currentText())
configs = ['config1', 'config2'] if config == 'bothconfigs' else [config]
hands = ['hand1', 'hand2'] if hand == 'bothhands' else [hand]
if quantifier == 'all':
pass #symbol is normal
elif quantifier == 'any':
if finger == 'thumb':
symbol = '{}|.(?={})'.format(symbol, symbol)
slots = [slots[0], -1*slots[1]]
elif finger == 'any':
pass
else:
symbol = '{}|.(?={})|.(?=.{})'.format(symbol, symbol, symbol)
slots = [slots[0], -1*slots[1], -1*slots[2]]
#this new "symbol" acts as a regex that looks ahead 2 or 3 slots, depending on the selected finger
#we don't want to put this regex in each of those slots, but rather only in the first one
elif quantifier == 'none':
symbol = '[^{}]'.format(symbol)
for c in configs:
for h in hands:
for slot in slots:
if slot < 0:
transcriptions[c+h].pop(slot-1*-1)
else:
transcriptions[c+h][slot-1] = symbol
for key, value in transcriptions.items():
regex = ['.' if v is None else v for v in value]
transcriptions[key] = regex
self.regularExpressions = [''.join(transcriptions[key]) for key in sorted(list(transcriptions.keys()))]
def accept(self):
self.accepted = True
super().accept()
def reject(self):
self.accepted = False
super().reject()
class PhraseSearchDialog(PhraseDialog, SearchDialog):
def __init__(self, corpus, recents):
PhraseDialog.__init__(self)
self.corpus = corpus
self.recents = recents
self.regularExpressions = None
self.transcriptions = list()
self.setWindowTitle('Seach by descriptive phrase')
self.addDescription.setText('Add search description')
self.introduction.setText('Find a handshape with the following properties...')
self.addFingerLayout()
self.regularExpressions = list()
showRecents = QPushButton('Show recent searches...')
showRecents.clicked.connect(self.recentSearches)
self.topButtonLayout.addWidget(showRecents)
def recentSearches(self):
result = self.showRecentSearches()
if result is not None:
results = result.recentData.segmentedTranscription
#iterate through the results and get the relevant parts of each phrase
#each result is a list of strings that, e.g.:
#['in','config','1','hand','2','all','of','the','joints','on','the','index','finger','are','flexed']
for i, result in enumerate(results):
config = ' '.join([result[1], result[2]])
hand = ' '.join([result[3], result[4]])
quantifier = result[5]
finger = result[11]
flexion = result[13]
#this try/except is to check for cases where there are more phrases in the result than there are
#currently displayed on screen. in such a case we need to add a new layout.
try:
layout = self.descriptionLayouts[i]
except IndexError:
layout = FingerSearchLayout()
self.descriptionLayouts.append(layout)
self.metaLayout.addLayout(layout)
index = layout.configs.findText(config)
layout.configs.setCurrentIndex(index)
index = layout.hands.findText(hand)
layout.hands.setCurrentIndex(index)
index = layout.quantifiers.findText(quantifier)
layout.quantifiers.setCurrentIndex(index)
index = layout.fingers.findText(finger)
layout.fingers.setCurrentIndex(index)
index = layout.flexions.findText(flexion)
layout.flexions.setCurrentIndex(index)
# if the recent search had fewer results than the current display, we need to delete any extra layouts
if len(results) < len(self.descriptionLayouts):
self.descriptionLayouts = self.descriptionLayouts[:len(results)]
for n in range(self.metaLayout.count()):
layout = self.metaLayout.itemAt(n)
if n + 1 <= len(results):
layout.deleteMe.setChecked(False)
else:
layout.deleteMe.setChecked(True)
self.removeFingerLayouts()
def accept(self):
self.generateRegEx()
self.generatePhrases()
super().accept()
class AutoFillDialog(PhraseDialog):
def __init__(self):
super().__init__()
self.setWindowTitle('Autofill')
self.addDescription.setText('Add autofill operation')
self.introduction.setText('Fill in the current transcription so that...')
self.addFingerLayout()
def accept(self):
self.generateTranscriptions()
super().accept()
def addFingerLayout(self):
super().addFingerLayout(disable_quantifiers=True)
def generateTranscriptions(self):
transcriptions = {'config1hand1': [None for n in range(34)],
'config1hand2': [None for n in range(34)],
'config2hand1': [None for n in range(34)],
'config2hand2': [None for n in range(34)]}
for layout in self.descriptionLayouts:
quantifier = layout.quantifiers.currentText().lower()
config = layout.configs.currentText().lower().replace(' ', '')
hand = layout.hands.currentText().lower().replace(' ', '')
slots = self.findSlotNumbers(layout.fingers.currentText().lower())
symbol = self.findTranscriptionSymbol(layout.flexions.currentText())
configs = ['config1', 'config2'] if config == 'bothconfigs' else [config]
hands = ['hand1', 'hand2'] if hand == 'bothhands' else [hand]
for c in configs:
for h in hands:
for slot in slots:
transcriptions[c+h][slot-1] = symbol
self.transcriptions = transcriptions
class TranscriptionsSearchOptionsDialog(QDialog):
def __init__(self, blankOptionSelection = None, wildcard = None):
super().__init__()
self.blankOptionSelection = blankOptionSelection
self.wildcard = wildcard
self.setWindowTitle('Search Options')
layout = QVBoxLayout()
blankOptionsLabel = QLabel('How should blank spaces be interpreted in your search?')
layout.addWidget(blankOptionsLabel)
blankOptionsLayout = QVBoxLayout()
self.blankOptionsGroup = QButtonGroup()
asBlankOption = QRadioButton('Interpret as literal blanks, and only match blank slots')
blankOptionsLayout.addWidget(asBlankOption)
self.blankOptionsGroup.addButton(asBlankOption)
self.blankOptionsGroup.setId(asBlankOption, 0)
if self.blankOptionSelection == 'literal':
asBlankOption.setChecked(True)
asWildcardOption = QRadioButton('Interpret as wildcards, and match anything')
blankOptionsLayout.addWidget(asWildcardOption)
self.blankOptionsGroup.addButton(asWildcardOption)
self.blankOptionsGroup.setId(asWildcardOption, 1)
if self.blankOptionSelection == 'wildcard' or self.blankOptionSelection is None:
asWildcardOption.setChecked(True)
miniLayout = QHBoxLayout()
asBlankWithWildcard = QRadioButton('Interpret as literal blanks, and use this character for wildcards: ')
self.wildcardLineEdit = QLineEdit()
self.wildcardLineEdit.setMaxLength(1)
self.wildcardLineEdit.setMaximumWidth(30)
self.blankOptionsGroup.addButton(asBlankWithWildcard)
self.blankOptionsGroup.setId(asBlankWithWildcard, 2)
if self.blankOptionSelection == 'both':
asBlankWithWildcard.setChecked(True)
self.wildcardLineEdit.setText(self.wildcard)
miniLayout.addWidget(asBlankWithWildcard)
miniLayout.addWidget(self.wildcardLineEdit)
blankOptionsLayout.addLayout(miniLayout)
layout.addLayout(blankOptionsLayout)
buttonLayout = QHBoxLayout()
ok = QPushButton('OK')
ok.clicked.connect(self.accept)
buttonLayout.addWidget(ok)
cancel = QPushButton('Cancel')
cancel.clicked.connect(self.reject)
buttonLayout.addWidget(cancel)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def accept(self):
selectedButton = self.blankOptionsGroup.checkedButton()
id_ = self.blankOptionsGroup.id(selectedButton)
if id_ == 0:
self.blankOptionSelection = 'literal'
self.wildcard = None
elif id_ == 1:
self.blankOptionSelection = 'wildcard'
self.wildcard = '_'
elif id_ == 2:
self.blankOptionSelection = 'both'
self.wildcard = self.wildcardLineEdit.text()
super().accept()
class GlossSearchDialog(QDialog):
def __init__(self, corpus):
super().__init__()
self.setWindowTitle('Search by gloss')
layout = QVBoxLayout()
searchLayout = QHBoxLayout()
searchLabel = QLabel('Enter gloss to search for: ')
searchLayout.addWidget(searchLabel)
self.searchEdit = QLineEdit()
completer = QCompleter()
model = QStringListModel()
model.setStringList([word.gloss for word in corpus])
completer.setModel(model)
completer.setCaseSensitivity(Qt.CaseInsensitive)
self.searchEdit.setCompleter(completer)
searchLayout.addWidget(self.searchEdit)
buttonLayout = QHBoxLayout()
ok = QPushButton('OK')
ok.clicked.connect(self.accept)
buttonLayout.addWidget(ok)
cancel = QPushButton('Cancel')
cancel.clicked.connect(self.reject)
buttonLayout.addWidget(cancel)
layout.addLayout(searchLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def reject(self):
self.accepted = False
super().reject()
def accept(self):
self.accepted = True
self.searchWord = self.searchEdit.text()
super().accept()
class TranscriptionSearchDialog(SearchDialog):
def __init__(self, corpus, recents, blankValue, wildcard):
super().__init__()
print('recents', recents)
print('blankValue', blankValue)
print('wildcard', wildcard)
self.corpus = corpus
self.recents = recents
self.blankValue = blankValue
self.wildcard = wildcard
self.setWindowTitle('Search')
self.setWindowFlags(Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint)
layout = QVBoxLayout()
#Set up up top layout
self.topLayout = QHBoxLayout()
explanation = QLabel()
text = ('Enter the transcription you want to match in your corpus.')
explanation.setText(text)
explanation.setFont(QFont('Arial', 16))
self.topLayout.addWidget(explanation)
layout.addLayout(self.topLayout)
#Set up config tabs
self.configTabs = QTabWidget()
self.configTabs.addTab(TranscriptionConfigTab(1), 'Config 1')
self.configTabs.addTab(TranscriptionConfigTab(2), 'Config 2')
layout.addWidget(self.configTabs)
# Add "global" handshape options (as checkboxes)
self.globalOptionsLayout = QHBoxLayout()
self.setupGlobalOptions()
layout.addLayout(self.globalOptionsLayout)
#Add hand image
self.infoPanel = QHBoxLayout()
self.handImage = HandShapeImage(getMediaFilePath('hand.JPG'))
self.infoPanel.addWidget(self.handImage)
self.transcriptionInfo = TranscriptionInfo()
self.infoPanel.addLayout(self.transcriptionInfo)
layout.addLayout(self.infoPanel)
#Connects some slots and signals
for k in [0,1]:
for slot in self.configTabs.widget(k).hand1Transcription.slots[1:]:
slot.slotSelectionChanged.connect(self.handImage.useNormalImage)
slot.slotSelectionChanged.connect(self.handImage.transcriptionSlotChanged)
slot.slotSelectionChanged.connect(self.transcriptionInfo.transcriptionSlotChanged)
slot.changeValidatorState(True)
for slot in self.configTabs.widget(k).hand2Transcription.slots[1:]:
slot.slotSelectionChanged.connect(self.handImage.useReverseImage)
slot.slotSelectionChanged.connect(self.handImage.transcriptionSlotChanged)
slot.slotSelectionChanged.connect(self.transcriptionInfo.transcriptionSlotChanged)
slot.changeValidatorState(True)
buttonLayout = QHBoxLayout()
blankOptionsButton = QPushButton('Search options...')
blankOptionsButton.clicked.connect(self.showSearchOptions)
buttonLayout.addWidget(blankOptionsButton)
showRecents = QPushButton('Show recent searches...')
showRecents.clicked.connect(self.recentSearches)
ok = QPushButton('Search')
ok.clicked.connect(self.accept)
cancel = QPushButton('Cancel')
cancel.clicked.connect(self.reject)
buttonLayout.addWidget(showRecents)
buttonLayout.addWidget(ok)
buttonLayout.addWidget(cancel)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.showMaximized()
def showSearchOptions(self):
dialog = TranscriptionsSearchOptionsDialog(self.blankValue, self.wildcard)
if dialog.exec_():
self.blankValue = dialog.blankOptionSelection
self.wildcard = dialog.wildcard
def recentSearches(self):
result = self.showRecentSearches()
if result is not None:
results = result.recentData.segmentedTranscription
results = [[results[0], results[1]], [results[2], results[3]]]
for config in [0,1]:
for hand in [0,1]:
widget = getattr(self.configTabs.widget(config), 'hand{}Transcription'.format(hand+1))
for n,symbol in enumerate(results[config][hand]):
slot = getattr(widget, 'slot{}'.format(n+1))
slot.setText(symbol)
def setupGlobalOptions(self):
self.globalOptionsWidgets = list()
globalOptionsLabel = QLabel('Global handshape options:')
globalOptionsLabel.setFont(QFont(FONT_NAME, FONT_SIZE))
self.globalOptionsLayout.addWidget(globalOptionsLabel)
for option in GLOBAL_OPTIONS:
widget = QCheckBox(option.title())
option += 'CheckBox'
setattr(self, option, widget)
widget = getattr(self, option)
self.globalOptionsLayout.addWidget(widget)
self.globalOptionsWidgets.append(widget)
def generateRegEx(self):
expressions = list()
for transcription in self.transcriptions:
regex = list()
for slot in transcription.slots:
symbol = slot.text()
if not symbol or symbol == ' ':
if self.blankValue == 'literal' or self.blankValue == 'both':
symbol = '_'
elif self.blankValue == 'wildcard':
symbol = '.'
if symbol == self.wildcard:
symbol = '.'
if symbol in ['?', '*', '$', '^', '+']:
symbol = '\\'+symbol
regex.append(symbol)
regex = ''.join(regex)
expressions.append(regex)
self.regularExpressions = expressions
def generateTranscriptions(self):
self.transcriptions = list()
self.transcriptions.append(self.configTabs.widget(0).hand1Transcription)
self.transcriptions.append(self.configTabs.widget(0).hand2Transcription)
self.transcriptions.append(self.configTabs.widget(1).hand1Transcription)
self.transcriptions.append(self.configTabs.widget(1).hand2Transcription)
def generateGlobalOptions(self):
for option in GLOBAL_OPTIONS:
setattr(self, option, getattr(self, option+'CheckBox').isChecked())
# self.forearm = self.forearmCheckBox.isChecked()
# self.estimated = self.estimatedCheckBox.isChecked()
# self.uncertain = self.uncertainCheckBox.isChecked()
# self.incomplete = self.incompleteCheckBox.isChecked()
# self.reduplicated = self.reduplicatedCheckBox.isChecked()
class RecentSearch:
def __init__(self, transcriptions, regex, results):
try:
#assume that this is a list of Transcription objects
top = ','.join([t.str_with_underscores() for t in transcriptions[0:2]])
bottom = ','.join([t.str_with_underscores() for t in transcriptions[2:-1]])
self.segmentedTranscription = [[slot.getText(empty_text='') for slot in transcription] for transcription in
transcriptions]
self.transcriptions = '\n'.join([top, bottom])
except AttributeError:
#if that fails, then it might be a descriptive phrase
if transcriptions[0].startswith('In Config'):
self.segmentedTranscription = [transcription.split(' ') for transcription in transcriptions]
self.transcriptions = '\n'.join(transcriptions)
#and if that's not the case, then it must be a list of transcriptions as strings
else:
top = ','.join([''.join(t) for t in transcriptions[0:2]])
bottom = ','.join([''.join(t) for t in transcriptions[2:-1]])
self.segmentedTranscription = [[slot for slot in transcription] for transcription in transcriptions]
self.transcriptions = '\n'.join([top, bottom])
self.regularExpression = regex
self.results = ', '.join([r.gloss for r in results])
def __str__(self):
return self.transcriptions
class RecentSearchItem(QTableWidgetItem):
def __init__(self, recentData, textType, menuText):
super().__init__()
self.recentData = recentData
self.setText(getattr(self.recentData, textType))
#self.makeMenu(menuText)
def makeMenu(self, text):
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showContextMenu)
self.popMenu = QMenu(self)
if 'Add' in text:
function = self.addToFavourites
elif 'Remove' in text:
function = self.removeFromFavourites
action = QAction(text, self, triggered = function)
self.popMenu.addAction(action)
def showContextMenu(self, point):
self.popMenu.exec_(self.mapToGlobal(point))
def addToFavourites(self):
pass
def removeFromFavourites(self):
pass
class RecentSearchTable(QTableWidget):
def __init__(self, searches, menuText):
super().__init__()
self.menuText = menuText
self.setupTable(searches)
def setupTable(self, searches):
self.setColumnCount(2)
self.setRowCount(len(searches))
self.setHorizontalHeaderLabels(['Search', 'Results'])
for row, recent in enumerate(searches):
self.setItem(row, 0, RecentSearchItem(recent, 'transcriptions', self.menuText))
self.setItem(row, 1, RecentSearchItem(recent, 'results', self.menuText))
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.resizeColumnToContents(0)
class RecentSearchDialog(QDialog):
def __init__(self, recents, favourites = None):
if favourites is None:
favourites = list()
super().__init__()
self.setWindowTitle('Recent Searches')
self.result = None
layout = QVBoxLayout()
tableLayout = QHBoxLayout()
self.recentTable = RecentSearchTable(recents, 'Add to favourites')
tableLayout.addWidget(self.recentTable)
self.favouriteTable = RecentSearchTable(favourites, 'Remove from favourites')
tableLayout.addWidget(self.favouriteTable)
buttonLayout = QHBoxLayout()
ok = QPushButton('Use selected search')
ok.clicked.connect(self.accept)
cancel = QPushButton('Cancel')
cancel.clicked.connect(self.reject)
buttonLayout.addWidget(ok)
buttonLayout.addWidget(cancel)
layout.addLayout(tableLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.resize(self.recentTable.width()*1.5, self.height())
def addToFavourites(self):
pass
def removeFromFavourites(self):
pass
def setupTable(self, table, searches):
table.setColumnCount(2)
table.setRowCount(len(searches))
table.setHorizontalHeaderLabels(['Search', 'Results'])
for row, recent in enumerate(searches):
table.setItem(row, 0, RecentSearchItem(recent, 'transcriptions'))
table.setItem(row, 1, RecentSearchItem(recent, 'results'))
table.setSelectionBehavior(QAbstractItemView.SelectRows)
table.resizeColumnToContents(0)
def makeMenu(self, table, text, function):
table.setContextMenuPolicy(Qt.CustomContextMenu)
table.customContextMenuRequested.connect(self.showContextMenu)
table.popMenu = QMenu(self)
action = QAction(text, self, triggered = function)
table.popMenu.addAction(action)
def showContextMenu(self, table, point):
table.popMenu.exec_(self.mapToGlobal(point))
def accept(self):
row = self.recentTable.currentRow()
self.result = self.recentTable.item(row, 0)
super().accept()
def reject(self):
self.result = None
super().reject()
class SearchResultsDialog(QDialog):
def __init__(self, results):
super().__init__()
self.setWindowTitle('Search Results')
layout = QVBoxLayout()
self.result = None
resultsLayout = QHBoxLayout()
self.resultsList = QListWidget()
for r in results:
self.resultsList.addItem(r.gloss)
resultsLayout.addWidget(self.resultsList)
layout.addLayout(resultsLayout)
buttonLayout = QHBoxLayout()
okButton = QPushButton('Go to this entry')
cancelButton = QPushButton('Cancel')
okButton.clicked.connect(self.accept)
cancelButton.clicked.connect(self.reject)
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def accept(self):
item = self.resultsList.currentItem()
self.result = item
super().accept()
def reject(self):
self.result = None
super().reject() | gpl-3.0 |
farodin91/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/origin_check_wsh.py | 516 | 1992 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This example is derived from test/testdata/handlers/origin_check_wsh.py.
def web_socket_do_extra_handshake(request):
if request.ws_origin == 'http://example.com':
return
raise ValueError('Unacceptable origin: %r' % request.ws_origin)
def web_socket_transfer_data(request):
request.connection.write('origin_check_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
hellodata/hellodate | 2/site-packages/django/db/backends/oracle/creation.py | 34 | 14625 | import sys
import time
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self.connection.close() # done with main user -- test user and tablespaces created
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = self.connection.settings_dict['USER'] = TEST_USER
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 300M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 150M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
return self._test_settings_get('PASSWORD', default=PASSWORD)
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='NAME')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['NAME'] + '_temp')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| lgpl-3.0 |
rbaerzib/xtreemfs | cpp/thirdparty/gtest-1.7.0/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
40223243/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/warnings.py | 752 | 13825 | """Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| agpl-3.0 |
avadacatavra/servo | tests/wpt/web-platform-tests/tools/lint/tests/test_file_lints.py | 3 | 21473 | from __future__ import unicode_literals
from ..lint import check_file_contents
from .base import check_errors
import os
import pytest
import six
INTERESTING_FILE_NAMES = {
"python": [
"test.py",
],
"js": [
"test.js",
],
"web-lax": [
"test.htm",
"test.html",
],
"web-strict": [
"test.svg",
"test.xht",
"test.xhtml",
],
}
def check_with_files(input_bytes):
return {
filename: (check_file_contents("", filename, six.BytesIO(input_bytes)), kind)
for (filename, kind) in
(
(os.path.join("html", filename), kind)
for (kind, filenames) in INTERESTING_FILE_NAMES.items()
for filename in filenames
)
}
def test_trailing_whitespace():
error_map = check_with_files(b"test; ")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("TRAILING WHITESPACE", "Whitespace at EOL", filename, 1)]
if kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_indent_tabs():
error_map = check_with_files(b"def foo():\n\x09pass")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("INDENT TABS", "Tabs used for indentation", filename, 2)]
if kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_cr_not_at_eol():
error_map = check_with_files(b"line1\rline2\r")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("CR AT EOL", "CR character in line separator", filename, 1)]
if kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_cr_at_eol():
error_map = check_with_files(b"line1\r\nline2\r\n")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [
("CR AT EOL", "CR character in line separator", filename, 1),
("CR AT EOL", "CR character in line separator", filename, 2),
]
if kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_w3c_test_org():
error_map = check_with_files(b"import('http://www.w3c-test.org/')")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("W3C-TEST.ORG", "External w3c-test.org domain used", filename, 1)]
if kind == "python":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
elif kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_web_platform_test():
error_map = check_with_files(b"import('http://web-platform.test/')")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("WEB-PLATFORM.TEST", "Internal web-platform.test domain used", filename, 1)]
if kind == "python":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
elif kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_webidl2_js():
error_map = check_with_files(b"<script src=/resources/webidl2.js>")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("WEBIDL2.JS", "Legacy webidl2.js script used", filename, 1)]
if kind == "python":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
elif kind == "web-strict":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
assert errors == expected
def test_console():
error_map = check_with_files(b"<script>\nconsole.log('error');\nconsole.error ('log')\n</script>")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind in ["web-lax", "web-strict", "js"]:
assert errors == [
("CONSOLE", "Console logging API used", filename, 2),
("CONSOLE", "Console logging API used", filename, 3),
]
else:
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
def test_setTimeout():
error_map = check_with_files(b"<script>setTimeout(() => 1, 10)</script>")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
else:
assert errors == [('SET TIMEOUT',
'setTimeout used; step_timeout should typically be used instead',
filename,
1)]
def test_eventSender():
error_map = check_with_files(b"<script>eventSender.mouseDown()</script>")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
else:
assert errors == [('LAYOUTTESTS APIS',
'eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
filename,
1)]
def test_testRunner():
error_map = check_with_files(b"<script>if (window.testRunner) { testRunner.waitUntilDone(); }</script>")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
else:
assert errors == [('LAYOUTTESTS APIS',
'eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
filename,
1)]
def test_windowDotInternals():
error_map = check_with_files(b"<script>if (window.internals) { internals.doAThing(); }</script>")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
else:
assert errors == [('LAYOUTTESTS APIS',
'eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
filename,
1)]
def test_meta_timeout():
code = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<meta name="timeout" />
<meta name="timeout" content="short" />
<meta name="timeout" content="long" />
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind in ["web-lax", "web-strict"]:
assert errors == [
("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", filename, None),
("INVALID-TIMEOUT", "Invalid timeout value ", filename, None),
("INVALID-TIMEOUT", "Invalid timeout value short", filename, None),
]
elif kind == "python":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, 2),
]
def test_early_testharnessreport():
code = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="/resources/testharnessreport.js"></script>
<script src="/resources/testharness.js"></script>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind in ["web-lax", "web-strict"]:
assert errors == [
("EARLY-TESTHARNESSREPORT", "testharnessreport.js script seen before testharness.js script", filename, None),
]
elif kind == "python":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, 2),
]
def test_multiple_testharness():
code = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharness.js"></script>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind in ["web-lax", "web-strict"]:
assert errors == [
("MULTIPLE-TESTHARNESS", "More than one <script src='/resources/testharness.js'>", filename, None),
("MISSING-TESTHARNESSREPORT", "Missing <script src='/resources/testharnessreport.js'>", filename, None),
]
elif kind == "python":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, 2),
]
def test_multiple_testharnessreport():
code = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/resources/testharnessreport.js"></script>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind in ["web-lax", "web-strict"]:
assert errors == [
("MULTIPLE-TESTHARNESSREPORT", "More than one <script src='/resources/testharnessreport.js'>", filename, None),
]
elif kind == "python":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, 2),
]
def test_present_testharnesscss():
code = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<link rel="stylesheet" href="/resources/testharness.css"/>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind in ["web-lax", "web-strict"]:
assert errors == [
("PRESENT-TESTHARNESSCSS", "Explicit link to testharness.css present", filename, None),
]
elif kind == "python":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, 2),
]
def test_testharness_path():
code = b"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="testharness.js"></script>
<script src="resources/testharness.js"></script>
<script src="../resources/testharness.js"></script>
<script src="http://w3c-test.org/resources/testharness.js"></script>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("W3C-TEST.ORG", "External w3c-test.org domain used", filename, 5)]
if kind == "python":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
elif kind in ["web-lax", "web-strict"]:
expected.extend([
("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
])
assert errors == expected
def test_testharnessreport_path():
code = b"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="testharnessreport.js"></script>
<script src="resources/testharnessreport.js"></script>
<script src="../resources/testharnessreport.js"></script>
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
expected = [("W3C-TEST.ORG", "External w3c-test.org domain used", filename, 5)]
if kind == "python":
expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
elif kind in ["web-lax", "web-strict"]:
expected.extend([
("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
])
assert errors == expected
def test_not_testharness_path():
code = b"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="resources/webperftestharness.js"></script>
</html>
"""
error_map = check_with_files(code)
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, 1),
]
else:
assert errors == []
@pytest.mark.skipif(six.PY3, reason="Cannot parse print statements from python 3")
def test_print_statement():
error_map = check_with_files(b"def foo():\n print 'statement'\n print\n")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [
("PRINT STATEMENT", "Print function used", filename, 2),
("PRINT STATEMENT", "Print function used", filename, 3),
]
elif kind == "web-strict":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, None),
]
else:
assert errors == []
def test_print_function():
error_map = check_with_files(b"def foo():\n print('function')\n")
for (filename, (errors, kind)) in error_map.items():
check_errors(errors)
if kind == "python":
assert errors == [
("PRINT STATEMENT", "Print function used", filename, 2),
]
elif kind == "web-strict":
assert errors == [
("PARSE-FAILED", "Unable to parse file", filename, None),
]
else:
assert errors == []
open_mode_code = """
def first():
return {0}("test.png")
def second():
return {0}("test.png", "r")
def third():
return {0}("test.png", "rb")
def fourth():
return {0}("test.png", encoding="utf-8")
def fifth():
return {0}("test.png", mode="rb")
"""
def test_open_mode():
for method in ["open", "file"]:
code = open_mode_code.format(method).encode("utf-8")
errors = check_file_contents("", "test.py", six.BytesIO(code))
check_errors(errors)
message = ("File opened without providing an explicit mode (note: " +
"binary files must be read with 'b' in the mode flags)")
assert errors == [
("OPEN-NO-MODE", message, "test.py", 3),
("OPEN-NO-MODE", message, "test.py", 12),
]
@pytest.mark.parametrize(
"filename,expect_error",
[
("foo/bar.html", False),
("css/bar.html", True),
])
def test_css_support_file(filename, expect_error):
errors = check_file_contents("", filename, six.BytesIO(b""))
check_errors(errors)
if expect_error:
assert errors == [
('SUPPORT-WRONG-DIR',
'Support file not in support directory',
filename,
None),
]
else:
assert errors == []
def test_css_missing_file_in_css():
code = b"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</html>
"""
errors = check_file_contents("", "css/foo/bar.html", six.BytesIO(code))
check_errors(errors)
assert errors == [
('MISSING-LINK',
'Testcase file must have a link to a spec',
"css/foo/bar.html",
None),
]
def test_css_missing_file_manual():
errors = check_file_contents("", "css/foo/bar-manual.html", six.BytesIO(b""))
check_errors(errors)
assert errors == [
('MISSING-LINK',
'Testcase file must have a link to a spec',
"css/foo/bar-manual.html",
None),
]
@pytest.mark.parametrize("filename", [
"foo.worker.js",
"foo.any.js",
])
@pytest.mark.parametrize("input,error", [
(b"""//META: timeout=long\n""", None),
(b"""// META: timeout=long\n""", None),
(b"""// META: timeout=long\n""", None),
(b"""// META: script=foo.js\n""", None),
(b"""# META:\n""", None),
(b"""\n// META: timeout=long\n""", (2, "STRAY-METADATA")),
(b""" // META: timeout=long\n""", (1, "INDENTED-METADATA")),
(b"""// META: timeout=long\n// META: timeout=long\n""", None),
(b"""// META: timeout=long\n\n// META: timeout=long\n""", (3, "STRAY-METADATA")),
(b"""// META: timeout=long\n// Start of the test\n// META: timeout=long\n""", (3, "STRAY-METADATA")),
(b"""// META:\n""", (1, "BROKEN-METADATA")),
(b"""// META: foobar\n""", (1, "BROKEN-METADATA")),
(b"""// META: foo=bar\n""", (1, "UNKNOWN-METADATA")),
(b"""// META: timeout=bar\n""", (1, "UNKNOWN-TIMEOUT-METADATA")),
])
def test_script_metadata(filename, input, error):
errors = check_file_contents("", filename, six.BytesIO(input))
check_errors(errors)
if error is not None:
line, kind = error
messages = {
"STRAY-METADATA": "Metadata comments should start the file",
"INDENTED-METADATA": "Metadata comments should start the line",
"BROKEN-METADATA": "Metadata comment is not formatted correctly",
"UNKNOWN-TIMEOUT-METADATA": "Unexpected value for timeout metadata",
"UNKNOWN-METADATA": "Unexpected kind of metadata",
}
assert errors == [
(kind,
messages[kind],
filename,
line),
]
else:
assert errors == []
@pytest.mark.parametrize("globals,error", [
(b"", None),
(b"default", None),
(b"!default", None),
(b"window", None),
(b"!window", None),
(b"!dedicatedworker", None),
(b"window, !window", "BROKEN-GLOBAL-METADATA"),
(b"!serviceworker", "BROKEN-GLOBAL-METADATA"),
(b"serviceworker, !serviceworker", "BROKEN-GLOBAL-METADATA"),
(b"worker, !dedicatedworker", None),
(b"worker, !serviceworker", None),
(b"!worker", None),
(b"foo", "UNKNOWN-GLOBAL-METADATA"),
(b"!foo", "UNKNOWN-GLOBAL-METADATA"),
])
def test_script_globals_metadata(globals, error):
filename = "foo.any.js"
input = b"""// META: global=%s\n""" % globals
errors = check_file_contents("", filename, six.BytesIO(input))
check_errors(errors)
if error is not None:
errors = [(k, f, l) for (k, _, f, l) in errors]
assert errors == [
(error,
filename,
1),
]
else:
assert errors == []
@pytest.mark.parametrize("input,error", [
(b"""#META: timeout=long\n""", None),
(b"""# META: timeout=long\n""", None),
(b"""# META: timeout=long\n""", None),
(b""""// META:"\n""", None),
(b"""\n# META: timeout=long\n""", (2, "STRAY-METADATA")),
(b""" # META: timeout=long\n""", (1, "INDENTED-METADATA")),
(b"""# META: timeout=long\n# META: timeout=long\n""", None),
(b"""# META: timeout=long\n\n# META: timeout=long\n""", (3, "STRAY-METADATA")),
(b"""# META: timeout=long\n# Start of the test\n# META: timeout=long\n""", (3, "STRAY-METADATA")),
(b"""# META:\n""", (1, "BROKEN-METADATA")),
(b"""# META: foobar\n""", (1, "BROKEN-METADATA")),
(b"""# META: foo=bar\n""", (1, "UNKNOWN-METADATA")),
(b"""# META: timeout=bar\n""", (1, "UNKNOWN-TIMEOUT-METADATA")),
])
def test_python_metadata(input, error):
filename = "test.py"
errors = check_file_contents("", filename, six.BytesIO(input))
check_errors(errors)
if error is not None:
line, kind = error
messages = {
"STRAY-METADATA": "Metadata comments should start the file",
"INDENTED-METADATA": "Metadata comments should start the line",
"BROKEN-METADATA": "Metadata comment is not formatted correctly",
"UNKNOWN-TIMEOUT-METADATA": "Unexpected value for timeout metadata",
"UNKNOWN-METADATA": "Unexpected kind of metadata",
}
assert errors == [
(kind,
messages[kind],
filename,
line),
]
else:
assert errors == []
| mpl-2.0 |
CS-SI/QGIS | python/plugins/db_manager/db_plugins/spatialite/data_model.py | 59 | 3450 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.core import QgsMessageLog
from ..plugin import BaseError
from ..data_model import (TableDataModel,
SqlResultModel,
SqlResultModelAsync,
SqlResultModelTask)
from .plugin import SLDatabase
class SLTableDataModel(TableDataModel):
def __init__(self, table, parent=None):
TableDataModel.__init__(self, table, parent)
fields_txt = u", ".join(self.fields)
table_txt = self.db.quoteId((self.table.schemaName(), self.table.name))
# run query and get results
sql = u"SELECT %s FROM %s" % (fields_txt, table_txt)
c = self.db._get_cursor()
self.db._execute(c, sql)
self.resdata = self.db._fetchall(c)
c.close()
del c
self.fetchedFrom = 0
self.fetchedCount = len(self.resdata)
def _sanitizeTableField(self, field):
# get fields, ignore geometry columns
dataType = field.dataType.upper()
if dataType[:5] == "MULTI":
dataType = dataType[5:]
if dataType[-3:] == "25D":
dataType = dataType[:-3]
if dataType[-10:] == "COLLECTION":
dataType = dataType[:-10]
if dataType in ["POINT", "LINESTRING", "POLYGON", "GEOMETRY"]:
return u'GeometryType(%s)' % self.db.quoteId(field.name)
return self.db.quoteId(field.name)
def rowCount(self, index=None):
return self.fetchedCount
class SLSqlResultModelTask(SqlResultModelTask):
def __init__(self, db, sql, parent):
super().__init__(db, sql, parent)
self.clone = None
def run(self):
try:
self.clone = SLDatabase(None, self.db.connector.uri())
self.model = SLSqlResultModel(self.clone, self.sql, None)
except BaseError as e:
self.error = e
QgsMessageLog.logMessage(e.msg)
return False
return True
def cancel(self):
if self.clone:
self.clone.connector.cancel()
SqlResultModelTask.cancel(self)
class SLSqlResultModelAsync(SqlResultModelAsync):
def __init__(self, db, sql, parent):
super().__init__()
self.task = SLSqlResultModelTask(db, sql, parent)
self.task.taskCompleted.connect(self.modelDone)
self.task.taskTerminated.connect(self.modelDone)
class SLSqlResultModel(SqlResultModel):
pass
| gpl-2.0 |
nwchandler/ansible | test/runner/lib/core_ci.py | 15 | 12905 | """Access Ansible Core CI remote services."""
from __future__ import absolute_import, print_function
import json
import os
import traceback
import uuid
import errno
import time
from lib.http import (
HttpClient,
HttpResponse,
HttpError,
)
from lib.util import (
ApplicationError,
run_command,
make_dirs,
display,
is_shippable,
)
from lib.config import (
EnvironmentConfig,
)
AWS_ENDPOINTS = {
'us-east-1': 'https://14blg63h2i.execute-api.us-east-1.amazonaws.com',
'us-east-2': 'https://g5xynwbk96.execute-api.us-east-2.amazonaws.com',
}
class AnsibleCoreCI(object):
"""Client for Ansible Core CI services."""
def __init__(self, args, platform, version, stage='prod', persist=True, name=None):
"""
:type args: EnvironmentConfig
:type platform: str
:type version: str
:type stage: str
:type persist: bool
:type name: str
"""
self.args = args
self.platform = platform
self.version = version
self.stage = stage
self.client = HttpClient(args)
self.connection = None
self.instance_id = None
self.name = name if name else '%s-%s' % (self.platform, self.version)
self.ci_key = os.path.expanduser('~/.ansible-core-ci.key')
aws_platforms = (
'aws',
'windows',
'freebsd',
'rhel',
'vyos',
'junos',
'ios',
)
osx_platforms = (
'osx',
)
if self.platform in aws_platforms:
if args.remote_aws_region:
# permit command-line override of region selection
region = args.remote_aws_region
# use a dedicated CI key when overriding the region selection
self.ci_key += '.%s' % args.remote_aws_region
elif is_shippable():
# split Shippable jobs across multiple regions to maximize use of launch credits
if self.platform == 'windows':
region = 'us-east-2'
else:
region = 'us-east-1'
else:
# send all non-Shippable jobs to us-east-1 to reduce api key maintenance
region = 'us-east-1'
self.endpoint = AWS_ENDPOINTS[region]
if self.platform == 'windows':
self.ssh_key = None
self.port = 5986
else:
self.ssh_key = SshKey(args)
self.port = 22
elif self.platform in osx_platforms:
self.endpoint = 'https://osx.testing.ansible.com'
self.ssh_key = SshKey(args)
self.port = None
else:
raise ApplicationError('Unsupported platform: %s' % platform)
self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s' % (self.name, self.stage))
if persist and self._load():
try:
display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.connection = self.get(always_raise_on=[404])
display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
except HttpError as ex:
if ex.status != 404:
raise
self._clear()
display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.instance_id = None
else:
self.instance_id = None
self._clear()
if self.instance_id:
self.started = True
else:
self.started = False
self.instance_id = str(uuid.uuid4())
def start(self):
"""Start instance."""
if is_shippable():
return self.start_shippable()
return self.start_remote()
def start_remote(self):
"""Start instance for remote development/testing."""
with open(self.ci_key, 'r') as key_fd:
auth_key = key_fd.read().strip()
return self._start(dict(
remote=dict(
key=auth_key,
nonce=None,
),
))
def start_shippable(self):
"""Start instance on Shippable."""
return self._start(dict(
shippable=dict(
run_id=os.environ['SHIPPABLE_BUILD_ID'],
job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
),
))
def stop(self):
"""Stop instance."""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
response = self.client.delete(self._uri)
if response.status_code == 404:
self._clear()
display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
if response.status_code == 200:
self._clear()
display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
raise self._create_http_error(response)
def get(self, tries=2, sleep=10, always_raise_on=None):
"""
Get instance connection information.
:type tries: int
:type sleep: int
:type always_raise_on: list[int] | None
:rtype: InstanceConnection
"""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return None
if not always_raise_on:
always_raise_on = []
if self.connection and self.connection.running:
return self.connection
while True:
tries -= 1
response = self.client.get(self._uri)
if response.status_code == 200:
break
error = self._create_http_error(response)
if not tries or response.status_code in always_raise_on:
raise error
display.warning('%s. Trying again after %d seconds.' % (error, sleep))
time.sleep(sleep)
if self.args.explain:
self.connection = InstanceConnection(
running=True,
hostname='cloud.example.com',
port=self.port or 12345,
username='username',
password='password' if self.platform == 'windows' else None,
)
else:
response_json = response.json()
status = response_json['status']
con = response_json['connection']
self.connection = InstanceConnection(
running=status == 'running',
hostname=con['hostname'],
port=int(con.get('port', self.port)),
username=con['username'],
password=con.get('password'),
)
status = 'running' if self.connection.running else 'starting'
display.info('Status update: %s/%s on instance %s is %s.' %
(self.platform, self.version, self.instance_id, status),
verbosity=1)
return self.connection
def wait(self):
"""Wait for the instance to become ready."""
for _ in range(1, 90):
if self.get().running:
return
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.platform, self.version, self.instance_id))
@property
def _uri(self):
return '%s/%s/jobs/%s' % (self.endpoint, self.stage, self.instance_id)
def _start(self, auth):
"""Start instance."""
if self.started:
display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
if self.platform == 'windows':
with open('examples/scripts/ConfigureRemotingForAnsible.ps1', 'rb') as winrm_config_fd:
winrm_config = winrm_config_fd.read().decode('utf-8')
else:
winrm_config = None
data = dict(
config=dict(
platform=self.platform,
version=self.version,
public_key=self.ssh_key.pub_contents if self.ssh_key else None,
query=False,
winrm_config=winrm_config,
)
)
data.update(dict(auth=auth))
headers = {
'Content-Type': 'application/json',
}
tries = 2
sleep = 10
while True:
tries -= 1
response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
if response.status_code == 200:
break
error = self._create_http_error(response)
if not tries:
raise error
display.warning('%s. Trying again after %d seconds.' % (error, sleep))
time.sleep(sleep)
self.started = True
self._save()
display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
if self.args.explain:
return {}
return response.json()
def _clear(self):
"""Clear instance information."""
try:
self.connection = None
os.remove(self.path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def _load(self):
"""Load instance information."""
try:
with open(self.path, 'r') as instance_fd:
self.instance_id = instance_fd.read()
self.started = True
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
self.instance_id = None
return self.instance_id
def _save(self):
"""Save instance information."""
if self.args.explain:
return
make_dirs(os.path.dirname(self.path))
with open(self.path, 'w') as instance_fd:
instance_fd.write(self.instance_id)
@staticmethod
def _create_http_error(response):
"""
:type response: HttpResponse
:rtype: ApplicationError
"""
response_json = response.json()
stack_trace = ''
if 'message' in response_json:
message = response_json['message']
elif 'errorMessage' in response_json:
message = response_json['errorMessage'].strip()
if 'stackTrace' in response_json:
trace = '\n'.join([x.rstrip() for x in traceback.format_list(response_json['stackTrace'])])
stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
else:
message = str(response_json)
return HttpError(response.status_code, '%s%s' % (message, stack_trace))
class SshKey(object):
"""Container for SSH key used to connect to remote instances."""
def __init__(self, args):
"""
:type args: EnvironmentConfig
"""
tmp = os.path.expanduser('~/.ansible/test/')
self.key = os.path.join(tmp, 'id_rsa')
self.pub = os.path.join(tmp, 'id_rsa.pub')
if not os.path.isfile(self.pub):
if not args.explain:
make_dirs(tmp)
run_command(args, ['ssh-keygen', '-q', '-t', 'rsa', '-N', '', '-f', self.key])
if args.explain:
self.pub_contents = None
else:
with open(self.pub, 'r') as pub_fd:
self.pub_contents = pub_fd.read().strip()
class InstanceConnection(object):
"""Container for remote instance status and connection details."""
def __init__(self, running, hostname, port, username, password):
"""
:type running: bool
:type hostname: str
:type port: int
:type username: str
:type password: str | None
"""
self.running = running
self.hostname = hostname
self.port = port
self.username = username
self.password = password
def __str__(self):
if self.password:
return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
return '%s:%s [%s]' % (self.hostname, self.port, self.username)
| gpl-3.0 |
outboxafrica/pimaa | PiMaa/sensors/lib/GrovePi/grove_co2_sensor/grove_co2_example.py | 2 | 1825 | #!/usr/bin/env python
#
# GrovePi Example for using the Grove - CO2 Sensor(http://www.seeedstudio.com/depot/Grove-CO2-Sensor-p-1863.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# Connect the CO2 sensor to the RPISER port on the GrovePi
import grove_co2_lib
import time
co2= grove_co2_lib.CO2()
while True:
[ppm,temp]= co2.read()
print("CO2 Conc: %d ppm\t Temp: %d C" %(ppm,temp))
time.sleep(1)
| gpl-3.0 |
PeterBeard/project-euler | python/problem-027.py | 1 | 1633 | """
Copyright 2016 Peter Beard
Distributed under the GNU GPL v2. For full terms, see the LICENSE file.
Problem #027
Euler discovered the remarkable quadratic formula:
n² + n + 41
It turns out that the formula will produce 40 primes for the consecutive values
n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible
by 41, and certainly when n = 41, 41² + 41 + 41 is clearly divisible by 41.
The incredible formula n² − 79n + 1601 was discovered, which produces 80 primes
for the consecutive values n = 0 to 79. The product of the coefficients, −79 and
1601, is −126479.
Considering quadratics of the form:
n² + an + b, where |a| < 1000 and |b| < 1000
where |n| is the modulus/absolute value of n
e.g. |11| = 11 and |−4| = 4
Find the product of the coefficients, a and b, for the quadratic expression that
produces the maximum number of primes for consecutive values of n, starting with
n = 0.
"""
from util import is_prime
def consecutive_primes(a, b):
"""
Find the number of consecutive primes generated by the formula
n² + an + b
"""
n = 0
while True:
if not is_prime(n*n + a*n + b):
return n
n += 1
def solution():
max_count = 0
max_a = 0
max_b = 0
for a in range(-1000,1001):
for b in range(-1000,1001):
c = consecutive_primes(a, b)
if max_count < c:
max_count = c
max_a = a
max_b = b
return max_a * max_b
print("The product of the a and b that produce the most primes for consecutive n is {}".format(solution()))
| gpl-2.0 |
stadelmanma/OpenPNM | OpenPNM/Geometry/models/throat_surface_area.py | 1 | 2248 | r"""
===============================================================================
Submodule -- throat_surface_area
===============================================================================
"""
import scipy as _sp
def cylinder(geometry, throat_diameter='throat.diameter',
throat_length='throat.length', **kwargs):
r"""
Calculate surface area for a cylindrical throat
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
throat_diameter : string
Dictionary key to the throat diameter array. Default is
'throat.diameter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
"""
D = geometry[throat_diameter]
L = geometry[throat_length]
value = _sp.constants.pi*D*L
return value
def cuboid(geometry, throat_diameter='throat.diameter',
throat_length='throat.length', **kwargs):
r"""
Calculate surface area for a cuboid throat
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
throat_diameter : string
Dictionary key to the throat diameter array. Default is
'throat.diameter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
"""
D = geometry[throat_diameter]
L = geometry[throat_length]
value = 4*D*L
return value
def extrusion(geometry, throat_perimeter='throat.perimeter',
throat_length='throat.length', **kwargs):
r"""
Calculate surface area for an arbitrary shaped throat give the perimeter
and length.
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
throat_perimeter : string
Dictionary key to the throat perimeter array. Default is
'throat.perimeter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
"""
P = geometry[throat_perimeter]
L = geometry[throat_length]
value = P*L
return value
| mit |
wangjun/odoo | addons/purchase/wizard/purchase_line_invoice.py | 177 | 5258 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class purchase_line_invoice(osv.osv_memory):
""" To create invoice for purchase order line"""
_name = 'purchase.order.line_invoice'
_description = 'Purchase Order Line Make Invoice'
def _make_invoice_by_partner(self, cr, uid, partner, orders, lines_ids, context=None):
"""
create a new invoice for one supplier
@param cr : Cursor
@param uid : Id of current user
@param partner : The object partner
@param orders : The set of orders to add in the invoice
@param lines : The list of line's id
"""
purchase_obj = self.pool.get('purchase.order')
account_jrnl_obj = self.pool.get('account.journal')
invoice_obj = self.pool.get('account.invoice')
name = orders and orders[0].name or ''
journal_id = account_jrnl_obj\
.search(cr, uid, [('type', '=', 'purchase')], context=None)
journal_id = journal_id and journal_id[0] or False
a = partner.property_account_payable.id
inv = {
'name': name,
'origin': name,
'type': 'in_invoice',
'journal_id': journal_id,
'reference': partner.ref,
'account_id': a,
'partner_id': partner.id,
'invoice_line': [(6, 0, lines_ids)],
'currency_id': orders[0].currency_id.id,
'comment': " \n".join([order.notes for order in orders if order.notes]),
'payment_term': orders[0].payment_term_id.id,
'fiscal_position': partner.property_account_position.id
}
inv_id = invoice_obj.create(cr, uid, inv, context=context)
purchase_obj.write(cr, uid, [order.id for order in orders], {'invoice_ids': [(4, inv_id)]}, context=context)
return inv_id
def makeInvoices(self, cr, uid, ids, context=None):
"""
To get Purchase Order line and create Invoice
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun view of Invoice
"""
if context is None:
context={}
record_ids = context.get('active_ids',[])
if record_ids:
res = False
invoices = {}
purchase_obj = self.pool.get('purchase.order')
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
for line in purchase_line_obj.browse(cr, uid, record_ids, context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.partner_id.id in invoices:
invoices[line.partner_id.id] = []
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, line, context=context)
inv_line_data.update({'origin': line.order_id.name})
inv_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
purchase_line_obj.write(cr, uid, [line.id], {'invoiced': True, 'invoice_lines': [(4, inv_id)]})
invoices[line.partner_id.id].append((line,inv_id))
res = []
for result in invoices.values():
il = map(lambda x: x[1], result)
orders = list(set(map(lambda x : x[0].order_id, result)))
res.append(self._make_invoice_by_partner(cr, uid, orders[0].partner_id, orders, il, context=context))
return {
'domain': "[('id','in', ["+','.join(map(str,res))+"])]",
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SeanWaclawik/open_source | lab6/markdown.py | 1 | 1813 | """
Markdown.py
0. just print whatever is passed in to stdin
0. if filename passed in as a command line parameter,
then print file instead of stdin
1. wrap input in paragraph tags
2. convert single asterisk or underscore pairs to em tags
3. convert double asterisk or underscore pairs to strong tags
4. convert `#` => `<h1>` and `</h1>`
`##` => `<h2>` and `</h2>`
`###` => `<h3>` and `</h3>`
5. convert `>` => `<blockquote>` and on the next line with no `>` end it with `</blockquote>`
"""
import fileinput
import re
indent = False
def convertStrong(line):
line = re.sub(r'\*\*(.*)\*\*', r'<strong>\1</strong>', line)
line = re.sub(r'__(.*)__', r'<strong>\1</strong>', line)
return line
def convertEm(line):
line = re.sub(r'\*(.*)\*', r'<em>\1</em>', line)
line = re.sub(r'_(.*)_', r'<em>\1</em>', line)
return line
def convertHeading(line):
line = re.sub(r'###(.*)', r'<h3>\1</h3>', line)
line = re.sub(r'##(.*)', r'<h2>\1</h2>', line)
line = re.sub(r'#(.*)', r'<h1>\1</h1>', line)
return line
def convertBlock(line):
line = re.sub(r'>(.*)', r'\1', line)
return line
def convertAll(line):
line = line.rstrip()
line = convertStrong(line)
line = convertEm(line)
line = convertHeading(line)
return line
for line in fileinput.input():
if ('>' in line and indent == False):
line = convertBlock(line)
line = '<blockquote><p>' + convertAll(line) + '</p>'
indent = True
elif ('>' in line and indent == True):
line = convertBlock(line)
line = '<p>' + convertAll(line) + '</p>'
elif (not '>' in line and indent == True):
indent = False
line = '</blockquote><p>' + convertAll(line) + '</p>'
else: # skip blockquote add normal p tags for line
line = convertAll(line)
line = '<p>' + line + '</p>'
print line,
| mit |
aam-at/tensorflow | tensorflow/compiler/tests/extract_image_patches_op_test.py | 25 | 4754 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(xla_test.XLATestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.session():
image_placeholder = array_ops.placeholder(dtypes.float32)
with self.test_scope():
out_tensor = array_ops.extract_image_patches(
image_placeholder,
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
feed_dict = {image_placeholder: image}
self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 2x2 kernel with SAME padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
def testKsize2x2Stride1x1Rate2x2Valid(self):
"""Test for 2x2 kernel with 2x2 dilation."""
# [1, 2, 2, 1]
image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
# [1, 2, 2, 4]
patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],
[[4, 6, 12, 14], [5, 7, 13, 15]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[2, 2],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1ValidDepth2(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 2]
image = [[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]]
# [1, 1, 1, 8]
patches = [[[[1, 5, 2, 6, 3, 7, 4, 8]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mihailignatenko/erp | openerp/report/int_to_text.py | 442 | 2641 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
unites = {
0: '', 1:'un', 2:'deux', 3:'trois', 4:'quatre', 5:'cinq', 6:'six', 7:'sept', 8:'huit', 9:'neuf',
10:'dix', 11:'onze', 12:'douze', 13:'treize', 14:'quatorze', 15:'quinze', 16:'seize',
21:'vingt et un', 31:'trente et un', 41:'quarante et un', 51:'cinquante et un', 61:'soixante et un',
71:'septante et un', 91:'nonante et un', 80:'quatre-vingts'
}
dizaine = {
1: 'dix', 2:'vingt', 3:'trente',4:'quarante', 5:'cinquante', 6:'soixante', 7:'septante', 8:'quatre-vingt', 9:'nonante'
}
centaine = {
0:'', 1: 'cent', 2:'deux cent', 3:'trois cent',4:'quatre cent', 5:'cinq cent', 6:'six cent', 7:'sept cent', 8:'huit cent', 9:'neuf cent'
}
mille = {
0:'', 1:'mille'
}
def _100_to_text(chiffre):
if chiffre in unites:
return unites[chiffre]
else:
if chiffre%10>0:
return dizaine[chiffre / 10]+'-'+unites[chiffre % 10]
else:
return dizaine[chiffre / 10]
def _1000_to_text(chiffre):
d = _100_to_text(chiffre % 100)
d2 = chiffre/100
if d2>0 and d:
return centaine[d2]+' '+d
elif d2>1 and not d:
return centaine[d2]+'s'
else:
return centaine[d2] or d
def _10000_to_text(chiffre):
if chiffre==0:
return 'zero'
part1 = _1000_to_text(chiffre % 1000)
part2 = mille.get(chiffre / 1000, _1000_to_text(chiffre / 1000)+' mille')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def int_to_text(i):
return _10000_to_text(i)
if __name__=='__main__':
for i in range(1,999999,139):
print int_to_text(i)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
strint/tensorflow | tensorflow/contrib/metrics/python/metrics/classification.py | 111 | 2647 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
| apache-2.0 |
littlstar/chromium.src | third_party/pexpect/pexpect.py | 173 | 77354 | """Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect('Password:')
child.sendline(mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
http://pexpect.sourceforge.net/
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
try:
import os
import sys
import time
import select
import string
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError as e:
raise ImportError(str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.6'
__revision__ = '1'
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
'split_command_line', '__version__', '__revision__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child.
This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a buffer fills before matching an expected pattern."""
def run(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudottys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect('(?i)password')
child.sendline(mypassword)
The previous code can be replace with the following::
from pexpect import *
run('scp foo [email protected]:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run("ssh [email protected] 'ls -l'",
events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback. """
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env)
if events is not None:
patterns = list(events.keys())
responses = list(events.values())
else:
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
patterns = None
responses = None
child_result_list = []
event_count = 0
while True:
try:
index = child.expect(patterns)
if type(child.after) in types.StringTypes:
child_result_list.append(child.before + child.after)
else:
# child.after may have been a TIMEOUT or EOF,
# which we don't want appended to the list.
child_result_list.append(child.before)
if type(responses[index]) in types.StringTypes:
child.send(responses[index])
elif isinstance(responses[index], types.FunctionType):
callback_result = responses[index](locals())
sys.stdout.flush()
if type(callback_result) in types.StringTypes:
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError('The callback must be a string or function.')
event_count = event_count + 1
except TIMEOUT as e:
child_result_list.append(child.before)
break
except EOF as e:
child_result_list.append(child.before)
break
child_result = ''.join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn(object):
"""This is the main class interface for Pexpect. Use this class to start
and control child applications. """
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None):
"""This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn('/usr/bin/ftp')
child = pexpect.spawn('/usr/bin/ssh [email protected]')
child = pexpect.spawn('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn('/usr/bin/ftp', [])
child = pexpect.spawn('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
If you want to run a command and pipe it through another command then
you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > logs.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incomming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incomming buffer. The default is to search from the beginning of the
imcomming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match The searchwindowsize does not
effect the size of the incomming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
self.pid = None
# the chile filedescriptor is initially closed
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
# input from child (read_nonblocking)
self.logfile_read = None
# output to send (send, sendline)
self.logfile_send = None
# max bytes to read at one time into buffer
self.maxread = maxread
# This is the read buffer. See maxread.
self.buffer = ''
# Data before searchwindowsize point is preserved, but not searched.
self.searchwindowsize = searchwindowsize
# Delay used before sending data to child. Time in seconds.
# Most Linux machines don't like this to be below 0.03 (30 ms).
self.delaybeforesend = 0.05
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
self.softspace = False
self.name = '<' + repr(self) + '>'
self.encoding = None
self.closed = True
self.cwd = cwd
self.env = env
# This flags if we are running on irix
self.__irix_hack = (sys.platform.lower().find('irix') >= 0)
# Solaris uses internal __fork_pty(). All others use pty.fork().
if ((sys.platform.lower().find('solaris') >= 0)
or (sys.platform.lower().find('sunos5') >= 0)):
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# Support subclasses that do not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn(command, args)
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. """
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
# -- Fernando Perez
try:
self.close()
except:
pass
def __str__(self):
"""This returns a human-readable string that represents the state of
the object. """
s = []
s.append(repr(self))
s.append('version: ' + __version__ + ' (' + __revision__ + ')')
s.append('command: ' + str(self.command))
s.append('args: ' + str(self.args))
s.append('searcher: ' + str(self.searcher))
s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
s.append('before (last 100 chars): ' + str(self.before)[-100:])
s.append('after: ' + str(self.after))
s.append('match: ' + str(self.match))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self, command, args=[]):
"""This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. """
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if isinstance(command, type(0)):
raise ExceptionPexpect('Command is an int type. ' +
'If this is a file descriptor then maybe you want to ' +
'use fdpexpect.fdspawn which takes an existing ' +
'file descriptor instead of a command string.')
if not isinstance(args, type([])):
raise TypeError('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
# Make a shallow copy of the args list.
self.args = args[:]
self.args.insert(0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect('The command was not found or was not ' +
'executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join(self.args) + '>'
assert self.pid is None, 'The pid member must be None.'
assert self.command is not None, 'The command member must not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError as e:
raise ExceptionPexpect('pty.fork() failed: ' + str(e))
else:
# Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0:
# Child
try:
# used by setwinsize()
self.child_fd = sys.stdout.fileno()
self.setwinsize(24, 80)
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range(3, max_fd):
try:
os.close(i)
except OSError:
pass
# I don't know why this works, but ignoring SIGHUP fixes a
# problem when trying to start a Java daemon with sudo
# (specifically, Tomcat).
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect("Could not open with os.openpty().")
pid = os.fork()
if pid < 0:
raise ExceptionPexpect("Failed os.fork().")
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
"""This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. """
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
raise ExceptionPexpect('Failed to disconnect from ' +
'controlling tty. It is still possible to open /dev/tty.')
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR)
if fd < 0:
raise ExceptionPexpect("Could not open child pty, " + child_name)
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect("Could not open controlling tty, /dev/tty")
else:
os.close(fd)
def fileno(self):
"""This returns the file descriptor of the pty for the child.
"""
return self.child_fd
def close(self, force=True):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close(self.child_fd)
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect('Could not terminate the child.')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush(self):
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty(self):
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. """
return os.isatty(self.child_fd)
def waitnoecho(self, timeout=-1):
"""This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
"""
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
"""This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). """
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho(self, state):
"""This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
"""
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but
# these were inconsistent and blocked on some platforms.
# TCSADRAIN would probably be ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def read_nonblocking(self, size=1, timeout=-1):
"""This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely.
If timeout is -1 then the self.timeout value is used. If timeout is 0
then the child is polled and if there is no data immediately ready
then this will raise a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. """
if self.closed:
raise ValueError('I/O operation on closed file.')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
# timeout of 0 means "poll"
r, w, e = self.__select([self.child_fd], [], [], 0)
if not r:
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
elif self.__irix_hack:
# Irix takes a long time before it realizes a child was terminated.
# FIXME So does this mean Irix systems are forced to always have
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF('End Of File (EOF). Slow platform.')
r, w, e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
# then finally admit that they are not alive.
self.flag_eof = True
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError as e:
# Linux does this
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
if s == '':
# BSD style
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write(s)
self.logfile_read.flush()
return s
raise ExceptionPexpect('Reached an unexpected state.')
def read(self, size=-1):
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return ''
if size < 0:
# delimiter default is EOF
self.expect(self.delimiter)
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile('.{%d}' % size, re.DOTALL)
# delimiter default is EOF
index = self.expect([cre, self.delimiter])
if index == 0:
### FIXME self.before should be ''. Should I assert this?
return self.after
return self.before
def readline(self, size=-1):
"""This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. """
if size == 0:
return ''
# delimiter default is EOF
index = self.expect(['\r\n', self.delimiter])
if index == 0:
return self.before + '\r\n'
else:
return self.before
def __iter__(self):
"""This is to support iterators over a file-like object.
"""
return self
def __next__(self):
"""This is to support iterators over a file-like object.
"""
result = self.readline()
if result == "":
raise StopIteration
return result
def readlines(self, sizehint=-1):
"""This reads until EOF using readline() and returns a list containing
the lines thus read. The optional 'sizehint' argument is ignored. """
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s):
"""This is similar to send() except that there is no return value.
"""
self.send(s)
def writelines(self, sequence):
"""This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators There is no return value.
"""
for s in sequence:
self.write(s)
def send(self, s):
"""This sends a string to the child process. This returns the number of
bytes written. If a log file was set then the data is also written to
the log. """
time.sleep(self.delaybeforesend)
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write(s)
self.logfile_send.flush()
c = os.write(self.child_fd, s.encode("utf-8"))
return c
def sendline(self, s=''):
"""This is like send(), but it adds a linefeed (os.linesep). This
returns the number of bytes written. """
n = self.send(s)
n = n + self.send(os.linesep)
return n
def sendcontrol(self, char):
"""This sends a control character to the child such as Ctrl-C or
Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
child.sendcontrol('g')
See also, sendintr() and sendeof().
"""
char = char.lower()
a = ord(char)
if a >= 97 and a <= 122:
a = a - ord('a') + 1
return self.send(chr(a))
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0
return self.send(chr(d[char]))
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. """
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to see EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write(self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write(self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
else:
# platform does not define VEOF so assume CTRL-D
char = chr(4)
self.send(char)
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
if hasattr(termios, 'VINTR'):
char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
else:
# platform does not define VINTR so assume CTRL-C
char = chr(3)
self.send(char)
def eof(self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. """
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError as e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. """
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
# You can't call wait() on a child process in the stopped state.
raise ExceptionPexpect('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. """
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to # get status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# No child processes
if e[0] == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise e
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# This should never happen...
if e[0] == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise e
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise ExceptionPexpect('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False
def kill(self, sig):
"""This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. """
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def compile_pattern_list(self, patterns):
"""This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
"""
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for p in patterns:
if type(p) in types.StringTypes:
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
raise TypeError('Argument must be one of StringTypes, ' +
'EOF, TIMEOUT, SRE_Pattern, or a list of those ' +
'type. %s' % str(type(p)))
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1):
"""This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first
match in the stream is chosen. If more than one pattern matches at that
point, the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect(['bar', 'foo', 'foobar'])
# returns 1('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect(['foobar', 'foo'])
# returns 0('foobar') if all input is available at once,
# but returs 1('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect(['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect(pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
"""
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1):
"""This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT(which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. """
return self.expect_loop(searcher_re(pattern_list),
timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1):
"""This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match."""
if (type(pattern_list) in types.StringTypes or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
return self.expect_loop(searcher_string(pattern_list),
timeout, searchwindowsize)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
"""This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. """
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True:
# Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end:]
self.before = incoming[: searcher.start]
self.after = incoming[searcher.start: searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if timeout < 0 and timeout is not None:
raise TIMEOUT('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking(self.maxread, timeout)
freshlen = len(c)
time.sleep(0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF as e:
self.buffer = ''
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF(str(e) + '\n' + str(self))
except TIMEOUT as e:
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT(str(e) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, rows, cols):
"""This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. """
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735:
# Same bits, but with sign.
TIOCSWINSZ = -2146929561
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character=chr(29),
input_filter=None, output_filter=None):
"""This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
# Note this 'p' global and used in sigwinch_passthrough.
p = pexpect.spawn('/bin/bash')
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
"""
# Flush the buffer.
self.stdout.write(self.buffer)
self.stdout.flush()
self.buffer = ''
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != '' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
"""This is used by the interact() method.
"""
return os.read(fd, 1000)
def __interact_copy(self, escape_character=None,
input_filter=None, output_filter=None):
"""This is used by the interact() method.
"""
while self.isalive():
r, w, e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
if output_filter:
data = output_filter(data)
if self.logfile is not None:
self.logfile.write(data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter:
data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select(self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error as e:
if e[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return([], [], [])
else:
# something else caused the select.error, so
# this actually is an exception.
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread(self, maxread):
"""This method is no longer supported or allowed. I don't like getters
and setters without a good reason. """
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the ' +
'maxread member variable.')
def setlog(self, fileobject):
"""This method is no longer supported or allowed.
"""
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the logfile ' +
'member variable.')
##############################################################################
# End of spawn class
##############################################################################
class searcher_string(object):
"""This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
"""
def __init__(self, strings):
"""This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in zip(list(range(len(strings))), strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings]
ss.append((-1, 'searcher_string:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index,
' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen + len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re(object):
"""This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
"""
def __init__(self, patterns):
"""This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types."""
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(list(range(len(patterns))), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [(n, ' %d: re.compile("%s")' %
(n, str(s.pattern))) for n, s in self._searches]
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer) - searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if n < first_match:
first_match = n
the_match = match
best_index = index
if first_match == absurd_match:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which(filename):
"""This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None."""
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '':
if os.access(filename, os.X_OK):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = string.split(p, os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if os.access(ff, os.X_OK):
return ff
return None
def split_command_line(command_line):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
# The state when consuming whitespace between commands.
state_whitespace = 4
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\':
# Escape the next character
state = state_esc
elif c == r"'":
# Handle single quote
state = state_singlequote
elif c == r'"':
# Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
# Do nothing.
None
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:set sr et ts=4 sw=4 ft=python :
| bsd-3-clause |
CenterForOpenScience/osf.io | api_tests/nodes/serializers/test_serializers.py | 5 | 9672 | from dateutil.parser import parse as parse_date
import pytest
from future.moves.urllib.parse import urlparse
from api.base.settings.defaults import API_BASE
from api.nodes.serializers import NodeSerializer
from api.sparse.serializers import SparseNodeSerializer, SparseRegistrationSerializer
from api.registrations.serializers import RegistrationSerializer
from framework.auth import Auth
from osf_tests.factories import (
AuthUserFactory,
UserFactory,
NodeFactory,
RegistrationFactory,
ProjectFactory
)
from tests.base import assert_datetime_equal
from tests.utils import make_drf_request_with_version
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeSerializer:
def test_node_serializer(self, user):
# test_node_serialization
parent = ProjectFactory(creator=user)
node = NodeFactory(creator=user, parent=parent)
req = make_drf_request_with_version(version='2.0')
result = NodeSerializer(node, context={'request': req}).data
data = result['data']
assert data['id'] == node._id
assert data['type'] == 'nodes'
# Attributes
attributes = data['attributes']
assert attributes['title'] == node.title
assert attributes['description'] == node.description
assert attributes['public'] == node.is_public
assert set(attributes['tags']) == set(node.tags.values_list('name', flat=True))
assert not attributes['current_user_can_comment']
assert attributes['category'] == node.category
assert attributes['registration'] == node.is_registration
assert attributes['fork'] == node.is_fork
assert attributes['collection'] == node.is_collection
assert attributes['analytics_key'] == node.keenio_read_key
assert attributes['wiki_enabled'] == node.has_addon('wiki')
# Relationships
relationships = data['relationships']
assert 'region' in relationships
assert 'children' in relationships
assert 'contributors' in relationships
assert 'files' in relationships
assert 'parent' in relationships
assert 'affiliated_institutions' in relationships
assert 'registrations' in relationships
assert 'forked_from' not in relationships
parent_link = relationships['parent']['links']['related']['href']
assert urlparse(
parent_link).path == '/{}nodes/{}/'.format(API_BASE, parent._id)
# test_fork_serialization
node = NodeFactory(creator=user)
fork = node.fork_node(auth=Auth(user))
req = make_drf_request_with_version(version='2.0')
result = NodeSerializer(fork, context={'request': req}).data
data = result['data']
# Relationships
relationships = data['relationships']
forked_from = relationships['forked_from']['links']['related']['href']
assert urlparse(
forked_from).path == '/{}nodes/{}/'.format(API_BASE, node._id)
# test_template_serialization
node = NodeFactory(creator=user)
fork = node.use_as_template(auth=Auth(user))
req = make_drf_request_with_version(version='2.0')
result = NodeSerializer(fork, context={'request': req}).data
data = result['data']
# Relationships
relationships = data['relationships']
templated_from = relationships['template_node']['links']['related']['href']
assert urlparse(
templated_from).path == '/{}nodes/{}/'.format(API_BASE, node._id)
@pytest.mark.django_db
class TestSparseNodeSerializer:
def test_sparse_node_serializer(self, user):
# test_node_serialization
parent = ProjectFactory(creator=user)
node = NodeFactory(creator=user, parent=parent)
req = make_drf_request_with_version(version='2.15')
result = SparseNodeSerializer(node, context={'request': req}).data
data = result['data']
assert data['id'] == node._id
assert data['type'] == 'sparse-nodes'
# Attributes
attributes = data['attributes']
assert attributes['title'] == node.title
assert attributes['description'] == node.description
assert attributes['public'] == node.is_public
assert set(attributes['tags']) == set(node.tags.values_list('name', flat=True))
assert 'current_user_can_comment' not in attributes
assert 'license' not in attributes
assert attributes['category'] == node.category
assert 'registration' not in attributes
assert attributes['fork'] == node.is_fork
# Relationships
relationships = data['relationships']
assert 'region' not in relationships
assert 'children' in relationships
assert 'detail' in relationships
assert 'contributors' in relationships
assert 'files' not in relationships
assert 'parent' in relationships
assert 'affiliated_institutions' not in relationships
assert 'registrations' not in relationships
assert 'forked_from' not in relationships
parent_link = relationships['parent']['links']['related']['href']
assert urlparse(parent_link).path == '/{}sparse/nodes/{}/'.format(API_BASE, parent._id)
assert 'sparse' not in relationships['detail']['links']['related']['href']
sparse_children_path = urlparse(relationships['children']['links']['related']['href']).path
assert sparse_children_path == '/{}sparse/nodes/{}/children/'.format(API_BASE, node._id)
@pytest.mark.django_db
class TestNodeRegistrationSerializer:
def test_serialization(self):
user = UserFactory()
versioned_request = make_drf_request_with_version(version='2.2')
registration = RegistrationFactory(creator=user)
result = RegistrationSerializer(
registration, context={
'request': versioned_request}).data
data = result['data']
assert data['id'] == registration._id
assert data['type'] == 'registrations'
should_not_relate_to_registrations = [
'registered_from',
'registered_by',
'registration_schema',
'region',
'provider',
'storage',
'groups',
]
# Attributes
attributes = data['attributes']
assert_datetime_equal(
parse_date(attributes['date_registered']),
registration.registered_date
)
assert attributes['withdrawn'] == registration.is_retracted
# Relationships
relationships = data['relationships']
# Relationships with data
relationship_urls = {
k: v['links']['related']['href'] for k, v
in relationships.items()}
assert 'registered_by' in relationships
registered_by = relationships['registered_by']['links']['related']['href']
assert urlparse(
registered_by).path == '/{}users/{}/'.format(API_BASE, user._id)
assert 'registered_from' in relationships
registered_from = relationships['registered_from']['links']['related']['href']
assert urlparse(registered_from).path == '/{}nodes/{}/'.format(
API_BASE, registration.registered_from._id)
api_registrations_url = '/{}registrations/'.format(API_BASE)
for relationship in relationship_urls:
if relationship in should_not_relate_to_registrations:
assert api_registrations_url not in relationship_urls[relationship]
else:
assert api_registrations_url in relationship_urls[relationship], 'For key {}'.format(
relationship)
@pytest.mark.django_db
class TestSparseRegistrationSerializer:
def test_sparse_registration_serializer(self, user):
user = UserFactory()
versioned_request = make_drf_request_with_version(version='2.2')
registration = RegistrationFactory(creator=user)
result = SparseRegistrationSerializer(
registration, context={
'request': versioned_request}).data
data = result['data']
assert data['id'] == registration._id
assert data['type'] == 'sparse-registrations'
# Attributes
attributes = data['attributes']
assert attributes['withdrawn'] == registration.is_retracted
assert attributes['title'] == registration.title
assert attributes['description'] == registration.description
assert attributes['public'] == registration.is_public
assert set(attributes['tags']) == set(registration.tags.values_list('name', flat=True))
assert 'current_user_can_comment' not in attributes
assert 'license' not in attributes
assert attributes['category'] == registration.category
assert attributes['fork'] == registration.is_fork
# Relationships
relationships = data['relationships']
assert 'registered_by' not in relationships
assert 'registered_from' not in relationships
assert 'region' not in relationships
assert 'children' in relationships
assert 'detail' in relationships
assert 'contributors' in relationships
assert 'files' not in relationships
assert 'affiliated_institutions' not in relationships
assert 'registrations' not in relationships
assert 'forked_from' not in relationships
assert 'sparse' not in relationships['detail']['links']['related']['href']
assert 'sparse' in relationships['children']['links']['related']['href']
| apache-2.0 |
HolgerPeters/scikit-learn | sklearn/cluster/spectral.py | 19 | 18536 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
alexschiller/osf.io | addons/googledrive/serializer.py | 32 | 1733 | from oauthlib.oauth2 import InvalidGrantError
from addons.base.serializer import StorageAddonSerializer
from website.util import api_url_for
class GoogleDriveSerializer(StorageAddonSerializer):
addon_short_name = 'googledrive'
def credentials_are_valid(self, user_settings, client):
try:
self.node_settings.fetch_access_token()
except (InvalidGrantError, AttributeError):
return False
return True
def serialized_folder(self, node_settings):
return {
'name': node_settings.folder_name,
'path': node_settings.folder_path
}
@property
def addon_serialized_urls(self):
node = self.node_settings.owner
return {
'auth': api_url_for('oauth_connect',
service_name='googledrive'),
'files': node.web_url_for('collect_file_trees'),
'config': node.api_url_for('googledrive_set_config'),
'deauthorize': node.api_url_for('googledrive_deauthorize_node'),
'importAuth': node.api_url_for('googledrive_import_auth'),
'folders': node.api_url_for('googledrive_folder_list'),
'accounts': node.api_url_for('googledrive_account_list')
}
@property
def serialized_node_settings(self):
result = super(GoogleDriveSerializer, self).serialized_node_settings
valid_credentials = True
if self.node_settings.external_account is not None:
try:
self.node_settings.fetch_access_token()
except InvalidGrantError:
valid_credentials = False
result['validCredentials'] = valid_credentials
return {'result': result}
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Flask-0.10/flask/testsuite/subclassing.py | 563 | 1214 | # -*- coding: utf-8 -*-
"""
flask.testsuite.subclassing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test that certain behavior of flask can be customized by
subclasses.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase
from flask._compat import StringIO
class FlaskSubclassingTestCase(FlaskTestCase):
def test_suppressed_exception_logging(self):
class SuppressedFlask(flask.Flask):
def log_exception(self, exc_info):
pass
out = StringIO()
app = SuppressedFlask(__name__)
app.logger_name = 'flask_tests/test_suppressed_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_equal(err, '')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FlaskSubclassingTestCase))
return suite
| mit |
mitocw/edx-platform | openedx/core/djangolib/fields.py | 4 | 1497 | """
Custom Django fields.
"""
from django.db import models
class CharNullField(models.CharField):
"""
CharField that stores NULL but returns ''
"""
description = "CharField that stores NULL but returns ''"
def to_python(self, value):
"""Converts the value into the correct Python object."""
if isinstance(value, models.CharField):
return value
if value is None:
return ""
else:
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Converts value to a backend-specific value."""
if not prepared:
value = self.get_prep_value(value)
if value == "":
return None
else:
return value
class BigAutoField(models.AutoField):
"""
AutoField that uses BigIntegers.
This exists in Django as of version 1.10.
"""
def db_type(self, connection):
"""
The type of the field to insert into the database.
"""
conn_module = type(connection).__module__
if "mysql" in conn_module:
return "bigint AUTO_INCREMENT"
elif "postgres" in conn_module:
return "bigserial"
else:
return super(BigAutoField, self).db_type(connection)
def rel_db_type(self, connection):
"""
The type to be used by relations pointing to this field.
Not used until Django 1.10.
"""
return "bigint"
| agpl-3.0 |
stefanw/flanker | tests/addresslib/external_dataset_test.py | 9 | 1684 | # coding:utf-8
import re
from .. import *
from nose.tools import assert_equal, assert_not_equal
from flanker.addresslib import address
COMMENT = re.compile(r'''\s*#''')
def test_mailbox_valid_set():
for line in MAILBOX_VALID_TESTS.split('\n'):
# strip line, skip over empty lines
line = line.strip()
if line == '':
continue
# skip over comments or empty lines
match = COMMENT.match(line)
if match:
continue
mbox = address.parse(line)
assert_not_equal(mbox, None)
def test_mailbox_invalid_set():
for line in MAILBOX_INVALID_TESTS.split('\n'):
# strip line, skip over empty lines
line = line.strip()
if line == '':
continue
# skip over comments
match = COMMENT.match(line)
if match:
continue
mbox = address.parse(line)
assert_equal(mbox, None)
def test_url_valid_set():
for line in URL_VALID_TESTS.split('\n'):
# strip line, skip over empty lines
line = line.strip()
if line == '':
continue
# skip over comments or empty lines
match = COMMENT.match(line)
if match:
continue
mbox = address.parse(line)
assert_not_equal(mbox, None)
def test_url_invalid_set():
for line in URL_INVALID_TESTS.split('\n'):
# strip line, skip over empty lines
line = line.strip()
if line == '':
continue
# skip over comments
match = COMMENT.match(line)
if match:
continue
mbox = address.parse(line)
assert_equal(mbox, None)
| apache-2.0 |
koduj-z-klasa/python101 | bazy/sqlraw/sqlraw05.py | 1 | 2291 | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
import sqlite3
# utworzenie połączenia z bazą przechowywaną na dysku
# lub w pamięci (':memory:')
con = sqlite3.connect('test.db')
# dostęp do kolumn przez indeksy i przez nazwy
con.row_factory = sqlite3.Row
# utworzenie obiektu kursora
cur = con.cursor()
# tworzenie tabel
cur.execute("DROP TABLE IF EXISTS klasa;")
cur.execute("""
CREATE TABLE IF NOT EXISTS klasa (
id INTEGER PRIMARY KEY ASC,
nazwa varchar(250) NOT NULL,
profil varchar(250) DEFAULT ''
)""")
cur.executescript("""
DROP TABLE IF EXISTS uczen;
CREATE TABLE IF NOT EXISTS uczen (
id INTEGER PRIMARY KEY ASC,
imie varchar(250) NOT NULL,
nazwisko varchar(250) NOT NULL,
klasa_id INTEGER NOT NULL,
FOREIGN KEY(klasa_id) REFERENCES klasa(id)
)""")
# wstawiamy jeden rekord danych
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1A', 'matematyczny'))
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1B', 'humanistyczny'))
# wykonujemy zapytanie SQL, które pobierze id klasy "1A" z tabeli "klasa".
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1A',))
klasa_id = cur.fetchone()[0]
# tupla "uczniowie" zawiera tuple z danymi poszczególnych uczniów
uczniowie = (
(None, 'Tomasz', 'Nowak', klasa_id),
(None, 'Jan', 'Kos', klasa_id),
(None, 'Piotr', 'Kowalski', klasa_id)
)
# wstawiamy wiele rekordów
cur.executemany('INSERT INTO uczen VALUES(?,?,?,?)', uczniowie)
# zatwierdzamy zmiany w bazie
con.commit()
# pobieranie danych z bazy
def czytajdane():
"""Funkcja pobiera z bazy i wyświetla informacje o uczniach."""
cur.execute(
"""
SELECT uczen.id,imie,nazwisko,nazwa FROM uczen,klasa
WHERE uczen.klasa_id=klasa.id
""")
uczniowie = cur.fetchall()
for uczen in uczniowie:
print uczen['id'], uczen['imie'], uczen['nazwisko'], uczen['nazwa']
print ""
czytajdane()
# zmiana klasy ucznia o identyfikatorze 2
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1B',))
klasa_id = cur.fetchone()[0]
cur.execute('UPDATE uczen SET klasa_id=? WHERE id=?', (klasa_id, 2))
# usunięcie ucznia o identyfikatorze 3
cur.execute('DELETE FROM uczen WHERE id=?', (3,))
czytajdane()
con.close()
| mit |
quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/requests/packages/urllib3/request.py | 714 | 5988 | from __future__ import absolute_import
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, headers=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': headers}
extra_kw.update(urlopen_kw)
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| mit |
partofthething/home-assistant | tests/components/template/test_weather.py | 1 | 1945 | """The tests for the Template Weather platform."""
from homeassistant.components.weather import (
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_SPEED,
DOMAIN,
)
from homeassistant.setup import async_setup_component
async def test_template_state_text(hass):
"""Test the state text of a template."""
await async_setup_component(
hass,
DOMAIN,
{
"weather": [
{"weather": {"platform": "demo"}},
{
"platform": "template",
"name": "test",
"condition_template": "sunny",
"forecast_template": "{{ states.weather.demo.attributes.forecast }}",
"temperature_template": "{{ states('sensor.temperature') | float }}",
"humidity_template": "{{ states('sensor.humidity') | int }}",
"pressure_template": "{{ states('sensor.pressure') }}",
"wind_speed_template": "{{ states('sensor.windspeed') }}",
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("sensor.temperature", 22.3)
await hass.async_block_till_done()
hass.states.async_set("sensor.humidity", 60)
await hass.async_block_till_done()
hass.states.async_set("sensor.pressure", 1000)
await hass.async_block_till_done()
hass.states.async_set("sensor.windspeed", 20)
await hass.async_block_till_done()
state = hass.states.get("weather.test")
assert state is not None
assert state.state == "sunny"
data = state.attributes
assert data.get(ATTR_WEATHER_TEMPERATURE) == 22.3
assert data.get(ATTR_WEATHER_HUMIDITY) == 60
assert data.get(ATTR_WEATHER_PRESSURE) == 1000
assert data.get(ATTR_WEATHER_WIND_SPEED) == 20
| mit |
Donkyhotay/pgu | examples/gui10.py | 13 | 4945 | """<title>Integration with a Game</title>
For games, it is usually preferrable to not have your game within
a GUI framework. This GUI framework can be placed within your game.
"""
import time
import random
import pygame
from pygame.locals import *
# the following line is not needed if pgu is installed
import sys; sys.path.insert(0, "..")
from pgu import gui
from gui7 import ColorDialog
# The maximum frame-rate
FPS = 30
WIDTH,HEIGHT = 640,480
##You can initialize the screen yourself.
##::
screen = pygame.display.set_mode((640,480),SWSURFACE)
##
class StarControl(gui.Table):
def __init__(self,**params):
gui.Table.__init__(self,**params)
def fullscreen_changed(btn):
#pygame.display.toggle_fullscreen()
print("TOGGLE FULLSCREEN")
def stars_changed(slider):
n = slider.value - len(stars)
if n < 0:
for i in range(n,0):
stars.pop()
else:
for i in range(0,n):
stars.append([random.randrange(-WIDTH*span,WIDTH*span),
random.randrange(-HEIGHT*span,HEIGHT*span),
random.randrange(1,dist)])
fg = (255,255,255)
self.tr()
self.td(gui.Label("Phil's Pygame GUI",color=fg),colspan=2)
self.tr()
self.td(gui.Label("Speed: ",color=fg),align=1)
e = gui.HSlider(100,-500,500,size=20,width=100,height=16,name='speed')
self.td(e)
self.tr()
self.td(gui.Label("Size: ",color=fg),align=1)
e = gui.HSlider(2,1,5,size=20,width=100,height=16,name='size')
self.td(e)
self.tr()
self.td(gui.Label("Quantity: ",color=fg),align=1)
e = gui.HSlider(100,1,1000,size=20,width=100,height=16,name='quantity')
e.connect(gui.CHANGE, stars_changed, e)
self.td(e)
self.tr()
self.td(gui.Label("Color: ",color=fg),align=1)
default = "#ffffff"
color = gui.Color(default,width=64,height=10,name='color')
color_d = ColorDialog(default)
color.connect(gui.CLICK,color_d.open,None)
self.td(color)
def update_col():
color.value = color_d.value
color_d.connect(gui.CHANGE,update_col)
btn = gui.Switch(value=False,name='fullscreen')
btn.connect(gui.CHANGE, fullscreen_changed, btn)
self.tr()
self.td(gui.Label("Full Screen: ",color=fg),align=1)
self.td(btn)
self.tr()
self.td(gui.Label("Warp Speed: ",color=fg),align=1)
self.td(gui.Switch(value=False,name='warp'))
##Using App instead of Desktop removes the GUI background. Note the call to app.init()
##::
form = gui.Form()
app = gui.App()
starCtrl = StarControl()
c = gui.Container(align=-1,valign=-1)
c.add(starCtrl,0,0)
app.init(c)
##
dist = 8192
span = 10
stars = []
def reset():
global stars
stars = []
for i in range(0,form['quantity'].value):
stars.append([random.randrange(-WIDTH*span,WIDTH*span),
random.randrange(-HEIGHT*span,HEIGHT*span),
random.randrange(1,dist)])
def render(dt):
speed = form['speed'].value*10
size = form['size'].value
color = form['color'].value
warp = form['warp'].value
colors = []
for i in range(256,0,-1):
colors.append((color[0]*i/256,color[1]*i/256,color[2]*i/256))
n = 0
for x,y,z in stars:
if warp:
z1 = max(1,z + speed*2)
x1 = x*256/z1
y1 = y*256/z1
xx1,yy1 = x1+WIDTH/2,y1+HEIGHT/2
x = x*256/z
y = y*256/z
xx,yy = x+WIDTH/2,y+HEIGHT/2
c = min(255,z * 255 / dist)
col = colors[int(c)]
if warp:
pygame.draw.line(screen,col,
(int(xx1),int(yy1)),
(int(xx),int(yy)),size)
pygame.draw.circle(screen,col,(int(xx),int(yy)),size)
ch = 0
z -= speed*dt
if z <= 0:
ch = 1
z += dist
if z > dist:
ch = 1
z -= dist
if ch:
stars[n][0] = random.randrange(-WIDTH*span,WIDTH*span)
stars[n][1] = random.randrange(-HEIGHT*span,HEIGHT*span)
stars[n][2] = z
n += 1
##You can include your own run loop.
##::
reset()
clock = pygame.time.Clock()
done = False
while not done:
for e in pygame.event.get():
if e.type is QUIT:
done = True
elif e.type is KEYDOWN and e.key == K_ESCAPE:
done = True
else:
app.event(e)
# Clear the screen and render the stars
dt = clock.tick(FPS)/1000.0
screen.fill((0,0,0))
render(dt)
app.paint()
pygame.display.flip()
| lgpl-2.1 |
smmribeiro/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_urllib.py | 325 | 8385 | """Fix changes imports of urllib which are now incompatible.
This is rather similar to fix_imports, but because of the more
complex nature of the fixing for urllib, it has its own fixer.
"""
# Author: Nick Edds
# Local imports
from lib2to3.fixes.fix_imports import alternates, FixImports
from lib2to3 import fixer_base
from lib2to3.fixer_util import (Name, Comma, FromImport, Newline,
find_indentation, Node, syms)
MAPPING = {"urllib": [
("urllib.request",
["URLopener", "FancyURLopener", "urlretrieve",
"_urlopener", "urlopen", "urlcleanup",
"pathname2url", "url2pathname"]),
("urllib.parse",
["quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "splitattr", "splithost", "splitnport",
"splitpasswd", "splitport", "splitquery", "splittag",
"splittype", "splituser", "splitvalue", ]),
("urllib.error",
["ContentTooShortError"])],
"urllib2" : [
("urllib.request",
["urlopen", "install_opener", "build_opener",
"Request", "OpenerDirector", "BaseHandler",
"HTTPDefaultErrorHandler", "HTTPRedirectHandler",
"HTTPCookieProcessor", "ProxyHandler",
"HTTPPasswordMgr",
"HTTPPasswordMgrWithDefaultRealm",
"AbstractBasicAuthHandler",
"HTTPBasicAuthHandler", "ProxyBasicAuthHandler",
"AbstractDigestAuthHandler",
"HTTPDigestAuthHandler", "ProxyDigestAuthHandler",
"HTTPHandler", "HTTPSHandler", "FileHandler",
"FTPHandler", "CacheFTPHandler",
"UnknownHandler"]),
("urllib.error",
["URLError", "HTTPError"]),
]
}
# Duplicate the url parsing functions for urllib2.
MAPPING["urllib2"].append(MAPPING["urllib"][1])
def build_pattern():
bare = set()
for old_module, changes in MAPPING.items():
for change in changes:
new_module, members = change
members = alternates(members)
yield """import_name< 'import' (module=%r
| dotted_as_names< any* module=%r any* >) >
""" % (old_module, old_module)
yield """import_from< 'from' mod_member=%r 'import'
( member=%s | import_as_name< member=%s 'as' any > |
import_as_names< members=any* >) >
""" % (old_module, members, members)
yield """import_from< 'from' module_star=%r 'import' star='*' >
""" % old_module
yield """import_name< 'import'
dotted_as_name< module_as=%r 'as' any > >
""" % old_module
# bare_with_attr has a special significance for FixImports.match().
yield """power< bare_with_attr=%r trailer< '.' member=%s > any* >
""" % (old_module, members)
class FixUrllib(FixImports):
def build_pattern(self):
return "|".join(build_pattern())
def transform_import(self, node, results):
"""Transform for the basic import case. Replaces the old
import name with a comma separated list of its
replacements.
"""
import_mod = results.get("module")
pref = import_mod.prefix
names = []
# create a Node list of the replacement modules
for name in MAPPING[import_mod.value][:-1]:
names.extend([Name(name[0], prefix=pref), Comma()])
names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
import_mod.replace(names)
def transform_member(self, node, results):
"""Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module.
"""
mod_member = results.get("mod_member")
pref = mod_member.prefix
member = results.get("member")
# Simple case with only a single member being imported
if member:
# this may be a list of length one, or just a node
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node, "This is an invalid module element")
# Multiple members being imported
else:
# a dictionary for replacements, order matters
modules = []
mod_dict = {}
members = results["members"]
for member in members:
# we only care about the actual members
if member.type == syms.import_as_name:
as_name = member.children[2].value
member_name = member.children[0].value
else:
member_name = member.value
as_name = None
if member_name != u",":
for change in MAPPING[mod_member.value]:
if member_name in change[1]:
if change[0] not in mod_dict:
modules.append(change[0])
mod_dict.setdefault(change[0], []).append(member)
new_nodes = []
indentation = find_indentation(node)
first = True
def handle_name(name, prefix):
if name.type == syms.import_as_name:
kids = [Name(name.children[0].value, prefix=prefix),
name.children[1].clone(),
name.children[2].clone()]
return [Node(syms.import_as_name, kids)]
return [Name(name.value, prefix=prefix)]
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:-1]:
names.extend(handle_name(elt, pref))
names.append(Comma())
names.extend(handle_name(elts[-1], pref))
new = FromImport(module, names)
if not first or node.parent.prefix.endswith(indentation):
new.prefix = indentation
new_nodes.append(new)
first = False
if new_nodes:
nodes = []
for new_node in new_nodes[:-1]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[-1])
node.replace(nodes)
else:
self.cannot_convert(node, "All module elements are invalid")
def transform_dot(self, node, results):
"""Transform for calls to module members in code."""
module_dot = results.get("bare_with_attr")
member = results.get("member")
new_name = None
if isinstance(member, list):
member = member[0]
for change in MAPPING[module_dot.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
module_dot.replace(Name(new_name,
prefix=module_dot.prefix))
else:
self.cannot_convert(node, "This is an invalid module element")
def transform(self, node, results):
if results.get("module"):
self.transform_import(node, results)
elif results.get("mod_member"):
self.transform_member(node, results)
elif results.get("bare_with_attr"):
self.transform_dot(node, results)
# Renaming and star imports are not supported for these modules.
elif results.get("module_star"):
self.cannot_convert(node, "Cannot handle star imports.")
elif results.get("module_as"):
self.cannot_convert(node, "This module is now multiple modules")
| apache-2.0 |
pombredanne/http-repo.gem5.org-gem5- | src/cpu/o3/probe/SimpleTrace.py | 59 | 2231 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Matt Horsnell
from Probe import *
class SimpleTrace(ProbeListenerObject):
type = 'SimpleTrace'
cxx_header = 'cpu/o3/probe/simple_trace.hh'
| bsd-3-clause |
groovecoder/kuma | vendor/packages/translate/storage/bundleprojstore.py | 24 | 10543 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import tempfile
from zipfile import ZipFile
from translate.storage.projstore import *
__all__ = ['BundleProjectStore', 'InvalidBundleError']
class InvalidBundleError(Exception):
pass
class BundleProjectStore(ProjectStore):
"""Represents a translate project bundle (zip archive)."""
# INITIALIZERS #
def __init__(self, fname):
super(BundleProjectStore, self).__init__()
self._tempfiles = {}
if fname and os.path.isfile(fname):
self.load(fname)
else:
self.zip = ZipFile(fname, 'w')
self.save()
self.zip.close()
self.zip = ZipFile(fname, 'a')
# CLASS METHODS #
@classmethod
def from_project(cls, proj, fname=None):
if fname is None:
fname = 'bundle.zip'
bundle = BundleProjectStore(fname)
for fn in proj.sourcefiles:
bundle.append_sourcefile(proj.get_file(fn))
for fn in proj.transfiles:
bundle.append_transfile(proj.get_file(fn))
for fn in proj.targetfiles:
bundle.append_targetfile(proj.get_file(fn))
bundle.settings = proj.settings.copy()
bundle.save()
return bundle
# METHODS #
def append_file(self, afile, fname, ftype='trans', delete_orig=False):
"""Append the given file to the project with the given filename, marked
to be of type ``ftype`` ('src', 'trans', 'tgt').
:param delete_orig: If ``True``, as set by
:meth:`~translate.storage.Project.convert_forward`,
``afile`` is deleted after appending, if
possible.
.. note:: For this implementation, the appended file will be deleted
from disk if ``delete_orig`` is ``True``.
"""
if fname and fname in self.zip.namelist():
raise ValueError("File already in bundle archive: %s" % (fname))
if not fname and isinstance(afile, basestring) and afile in self.zip.namelist():
raise ValueError("File already in bundle archive: %s" % (afile))
afile, fname = super(BundleProjectStore, self).append_file(afile, fname, ftype)
self._zip_add(fname, afile)
if delete_orig and hasattr(afile, 'name') and afile.name not in self._tempfiles:
try:
os.unlink(afile.name)
except Exception:
pass
return self.get_file(fname), fname
def remove_file(self, fname, ftype=None):
"""Remove the file with the given project name from the project."""
super(BundleProjectStore, self).remove_file(fname, ftype)
self._zip_delete([fname])
tempfiles = [tmpf for tmpf, prjf in self._tempfiles.iteritems() if prjf == fname]
if tempfiles:
for tmpf in tempfiles:
try:
os.unlink(tmpf)
except Exception:
pass
del self._tempfiles[tmpf]
def close(self):
super(BundleProjectStore, self).close()
self.cleanup()
self.zip.close()
def cleanup(self):
"""Clean up our mess: remove temporary files."""
for tempfname in self._tempfiles:
if os.path.isfile(tempfname):
os.unlink(tempfname)
self._tempfiles = {}
def get_file(self, fname):
"""Retrieve a project file (source, translation or target file) from the
project archive."""
retfile = None
if fname in self._files or fname in self.zip.namelist():
# Check if the file has not already been extracted to a temp file
tempfname = [tfn for tfn in self._tempfiles if self._tempfiles[tfn] == fname]
if tempfname and os.path.isfile(tempfname[0]):
tempfname = tempfname[0]
else:
tempfname = ''
if not tempfname:
# Extract the file to a temporary file
zfile = self.zip.open(fname)
tempfname = os.path.split(fname)[-1]
tempfd, tempfname = tempfile.mkstemp(suffix='_' + tempfname)
os.close(tempfd)
open(tempfname, 'w').write(zfile.read())
retfile = open(tempfname)
self._tempfiles[tempfname] = fname
if not retfile:
raise FileNotInProjectError(fname)
return retfile
def get_proj_filename(self, realfname):
"""Try and find a project file name for the given real file name."""
try:
fname = super(BundleProjectStore, self).get_proj_filename(realfname)
except ValueError as ve:
fname = None
if fname:
return fname
if realfname in self._tempfiles:
return self._tempfiles[realfname]
raise ValueError('Real file not in project store: %s' % (realfname))
def load(self, zipname):
"""Load the bundle project from the zip file of the given name."""
self.zip = ZipFile(zipname, mode='a')
self._load_settings()
append_section = {
'sources': self._sourcefiles.append,
'targets': self._targetfiles.append,
'transfiles': self._transfiles.append,
}
for section in ('sources', 'targets', 'transfiles'):
if section in self.settings:
for fname in self.settings[section]:
append_section[section](fname)
self._files[fname] = None
def save(self, filename=None):
"""Save all project files to the bundle zip file."""
self._update_from_tempfiles()
if filename:
newzip = ZipFile(filename, 'w')
else:
newzip = self._create_temp_zipfile()
# Write project file for the new zip bundle
newzip.writestr('project.xtp', self._generate_settings())
# Copy project files from project to the new zip file
project_files = self._sourcefiles + self._transfiles + self._targetfiles
for fname in project_files:
newzip.writestr(fname, self.get_file(fname).read())
# Copy any extra (non-project) files from the current zip
for fname in self.zip.namelist():
if fname in project_files or fname == 'project.xtp':
continue
newzip.writestr(fname, self.zip.read(fname))
self._replace_project_zip(newzip)
def update_file(self, pfname, infile):
"""Updates the file with the given project file name with the contents
of ``infile``.
:returns: the results from :meth:`BundleProjStore.append_file`."""
if pfname not in self._files:
raise FileNotInProjectError(pfname)
if pfname not in self.zip.namelist():
return super(BundleProjectStore, self).update_file(pfname, infile)
self._zip_delete([pfname])
self._zip_add(pfname, infile)
def _load_settings(self):
"""Grab the project.xtp file from the zip file and load it."""
if 'project.xtp' not in self.zip.namelist():
raise InvalidBundleError('Not a translate project bundle')
super(BundleProjectStore, self)._load_settings(self.zip.open('project.xtp').read())
def _create_temp_zipfile(self):
"""Create a new zip file with a temporary file name (with mode 'w')."""
newzipfd, newzipfname = tempfile.mkstemp(prefix='translate_bundle', suffix='.zip')
os.close(newzipfd)
return ZipFile(newzipfname, 'w')
def _replace_project_zip(self, zfile):
"""Replace the currently used zip file (``self.zip``) with the given zip
file. Basically, ``os.rename(zfile.filename, self.zip.filename)``."""
if not zfile.fp.closed:
zfile.close()
if not self.zip.fp.closed:
self.zip.close()
shutil.move(zfile.filename, self.zip.filename)
self.zip = ZipFile(self.zip.filename, mode='a')
def _update_from_tempfiles(self):
"""Update project files from temporary files."""
for tempfname in self._tempfiles:
tmp = open(tempfname)
self.update_file(self._tempfiles[tempfname], tmp)
if not tmp.closed:
tmp.close()
def _zip_add(self, pfname, infile):
"""Add the contents of ``infile`` to the zip with file name ``pfname``."""
if hasattr(infile, 'seek'):
infile.seek(0)
self.zip.writestr(pfname, infile.read())
# Clear the cached file object to force the file to be read from the
# zip file.
self._files[pfname] = None
def _zip_delete(self, fnames):
"""Delete the files with the given names from the zip file (``self.zip``)."""
# Sanity checking
if not isinstance(fnames, (list, tuple)):
raise ValueError("fnames must be list or tuple: %s" % (fnames))
if not self.zip:
raise ValueError("No zip file to work on")
zippedfiles = self.zip.namelist()
for fn in fnames:
if fn not in zippedfiles:
raise KeyError("File not in zip archive: %s" % (fn))
newzip = self._create_temp_zipfile()
newzip.writestr('project.xtp', self._generate_settings())
for fname in zippedfiles:
# Copy all files from self.zip that are not project.xtp (already
# in the new zip file) or in fnames (they are to be removed, after
# all.
if fname in fnames or fname == 'project.xtp':
continue
newzip.writestr(fname, self.zip.read(fname))
self._replace_project_zip(newzip)
| mpl-2.0 |
hajuuk/R7000 | ap/gpl/samba-3.0.13/source/stf/smbcontrol.py | 137 | 7998 | #!/usr/bin/python
#
# Test for smbcontrol command line argument handling.
#
import comfychair
class NoArgs(comfychair.TestCase):
"""Test no arguments produces usage message."""
def runtest(self):
out = self.runcmd("smbcontrol", expectedResult = 1)
self.assert_re_match("Usage: smbcontrol", out[1])
class OneArg(comfychair.TestCase):
"""Test single argument produces usage message."""
def runtest(self):
out = self.runcmd("smbcontrol foo", expectedResult = 1)
self.assert_re_match("Usage: smbcontrol", out[1])
class SmbdDest(comfychair.TestCase):
"""Test the broadcast destination 'smbd'."""
def runtest(self):
out = self.runcmd("smbcontrol smbd noop")
class NmbdDest(comfychair.TestCase):
"""Test the destination 'nmbd'."""
def runtest(self):
# We need a way to start/stop/whatever nmbd
raise comfychair.NotRunError, "not implemented"
class PidDest(comfychair.TestCase):
"""Test a pid number destination'."""
def runtest(self):
out = self.runcmd("smbcontrol 1234 noop")
class SelfDest(comfychair.TestCase):
"""Test the destination 'self'."""
def runtest(self):
out = self.runcmd("smbcontrol self noop")
class WinbinddDest(comfychair.TestCase):
"""Test the destination 'winbindd'."""
def runtest(self):
# We need a way to start/stop/whatever winbindd
raise comfychair.NotRunError, "not implemented"
class BadDest(comfychair.TestCase):
"""Test a bad destination."""
def runtest(self):
out = self.runcmd("smbcontrol foo noop", expectedResult = 1)
class BadCmd(comfychair.TestCase):
"""Test a bad command."""
def runtest(self):
out = self.runcmd("smbcontrol self spottyfoot", expectedResult = 1)
self.assert_re_match("smbcontrol: unknown command", out[1]);
class NoArgCmdTest(comfychair.TestCase):
"""A test class that tests a command with no argument."""
def runtest(self):
self.require_root()
out = self.runcmd("smbcontrol self %s" % self.cmd)
out = self.runcmd("smbcontrol self %s spottyfoot" % self.cmd,
expectedResult = 1)
class ForceElection(NoArgCmdTest):
"""Test a force-election message."""
def setup(self):
self.cmd = "force-election"
class SamSync(NoArgCmdTest):
"""Test a samsync message."""
def setup(self):
self.cmd = "samsync"
class SamRepl(NoArgCmdTest):
"""Test a samrepl message."""
def setup(self):
self.cmd = "samrepl"
class DmallocChanged(NoArgCmdTest):
"""Test a dmalloc-changed message."""
def setup(self):
self.cmd = "dmalloc-log-changed"
class DmallocMark(NoArgCmdTest):
"""Test a dmalloc-mark message."""
def setup(self):
self.cmd = "dmalloc-mark"
class Shutdown(NoArgCmdTest):
"""Test a shutdown message."""
def setup(self):
self.cmd = "shutdown"
class Ping(NoArgCmdTest):
"""Test a ping message."""
def setup(self):
self.cmd = "ping"
class Debuglevel(NoArgCmdTest):
"""Test a debuglevel message."""
def setup(self):
self.cmd = "debuglevel"
class OneArgCmdTest(comfychair.TestCase):
"""A test class that tests a command with one argument."""
def runtest(self):
self.require_root()
out = self.runcmd("smbcontrol self %s spottyfoot" % self.cmd)
out = self.runcmd("smbcontrol self %s" % self.cmd, expectedResult = 1)
class DrvUpgrade(OneArgCmdTest):
"""Test driver upgrade message."""
def setup(self):
self.cmd = "drvupgrade"
class CloseShare(OneArgCmdTest):
"""Test close share message."""
def setup(self):
self.cmd = "close-share"
class Debug(OneArgCmdTest):
"""Test a debug message."""
def setup(self):
self.cmd = "debug"
class PrintNotify(comfychair.TestCase):
"""Test print notification commands."""
def runtest(self):
# No subcommand
out = self.runcmd("smbcontrol self printnotify", expectedResult = 1)
self.assert_re_match("Must specify subcommand", out[1]);
# Invalid subcommand name
out = self.runcmd("smbcontrol self printnotify spottyfoot",
expectedResult = 1)
self.assert_re_match("Invalid subcommand", out[1]);
# Queue commands
for cmd in ["queuepause", "queueresume"]:
out = self.runcmd("smbcontrol self printnotify %s" % cmd,
expectedResult = 1)
self.assert_re_match("Usage:", out[1])
out = self.runcmd("smbcontrol self printnotify %s spottyfoot"
% cmd)
# Job commands
for cmd in ["jobpause", "jobresume", "jobdelete"]:
out = self.runcmd("smbcontrol self printnotify %s" % cmd,
expectedResult = 1)
self.assert_re_match("Usage:", out[1])
out = self.runcmd("smbcontrol self printnotify %s spottyfoot"
% cmd, expectedResult = 1)
self.assert_re_match("Usage:", out[1])
out = self.runcmd("smbcontrol self printnotify %s spottyfoot 123"
% cmd)
# Printer properties
out = self.runcmd("smbcontrol self printnotify printer",
expectedResult = 1)
self.assert_re_match("Usage", out[1])
out = self.runcmd("smbcontrol self printnotify printer spottyfoot",
expectedResult = 1)
self.assert_re_match("Usage", out[1])
for cmd in ["comment", "port", "driver"]:
out = self.runcmd("smbcontrol self printnotify printer spottyfoot "
"%s" % cmd, expectedResult = 1)
self.assert_re_match("Usage", out[1])
out = self.runcmd("smbcontrol self printnotify printer spottyfoot "
"%s value" % cmd)
class Profile(comfychair.TestCase):
"""Test setting the profiling level."""
def runtest(self):
self.require_root()
out = self.runcmd("smbcontrol self profile", expectedResult = 1)
self.assert_re_match("Usage", out[1])
out = self.runcmd("smbcontrol self profile spottyfoot",
expectedResult = 1)
self.assert_re_match("Unknown", out[1])
for cmd in ["off", "count", "on", "flush"]:
out = self.runcmd("smbcontrol self profile %s" % cmd)
class ProfileLevel(comfychair.TestCase):
"""Test requesting the current profiling level."""
def runtest(self):
self.require_root()
out = self.runcmd("smbcontrol self profilelevel spottyfoot",
expectedResult = 1)
self.assert_re_match("Usage", out[1])
out = self.runcmd("smbcontrol self profilelevel")
class TimeoutArg(comfychair.TestCase):
"""Test the --timeout argument."""
def runtest(self):
out = self.runcmd("smbcontrol --timeout 5 self noop")
out = self.runcmd("smbcontrol --timeout spottyfoot self noop",
expectedResult = 1)
class ConfigFileArg(comfychair.TestCase):
"""Test the --configfile argument."""
def runtest(self):
out = self.runcmd("smbcontrol --configfile /dev/null self noop")
class BogusArg(comfychair.TestCase):
"""Test a bogus command line argument."""
def runtest(self):
out = self.runcmd("smbcontrol --bogus self noop", expectedResult = 1)
tests = [NoArgs, OneArg, SmbdDest, NmbdDest, WinbinddDest, PidDest,
SelfDest, BadDest, BadCmd, Debug, ForceElection, SamSync,
SamRepl, DmallocMark, DmallocChanged, Shutdown, DrvUpgrade,
CloseShare, Ping, Debuglevel, PrintNotify, Profile, ProfileLevel,
TimeoutArg, ConfigFileArg, BogusArg]
# Handle execution of this file as a main program
if __name__ == '__main__':
comfychair.main(tests)
| gpl-2.0 |
atlassian/boto | boto/sdb/db/sequence.py | 153 | 8223 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import SDBResponseError
from boto.compat import six
class SequenceGenerator(object):
"""Generic Sequence Generator object, this takes a single
string as the "sequence" and uses that to figure out
what the next value in a string is. For example
if you give "ABC" and pass in "A" it will give you "B",
and if you give it "C" it will give you "AA".
If you set "rollover" to True in the above example, passing
in "C" would give you "A" again.
The Sequence string can be a string or any iterable
that has the "index" function and is indexable.
"""
__name__ = "SequenceGenerator"
def __init__(self, sequence_string, rollover=False):
"""Create a new SequenceGenerator using the sequence_string
as how to generate the next item.
:param sequence_string: The string or list that explains
how to generate the next item in the sequence
:type sequence_string: str,iterable
:param rollover: Rollover instead of incrementing when
we hit the end of the sequence
:type rollover: bool
"""
self.sequence_string = sequence_string
self.sequence_length = len(sequence_string[0])
self.rollover = rollover
self.last_item = sequence_string[-1]
self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
def __call__(self, val, last=None):
"""Get the next value in the sequence"""
# If they pass us in a string that's not at least
# the lenght of our sequence, then return the
# first element in our sequence
if val is None or len(val) < self.sequence_length:
return self.sequence_string[0]
last_value = val[-self.sequence_length:]
if (not self.rollover) and (last_value == self.last_item):
val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value))
else:
val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value))
return val
def _inc(self, val):
"""Increment a single value"""
assert(len(val) == self.sequence_length)
return self.sequence_string[(self.sequence_string.index(val) + 1) % len(self.sequence_string)]
#
# Simple Sequence Functions
#
def increment_by_one(cv=None, lv=None):
if cv is None:
return 0
return cv + 1
def double(cv=None, lv=None):
if cv is None:
return 1
return cv * 2
def fib(cv=1, lv=0):
"""The fibonacci sequence, this incrementer uses the
last value"""
if cv is None:
cv = 1
if lv is None:
lv = 0
return cv + lv
increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class Sequence(object):
"""A simple Sequence using the new SDB "Consistent" features
Based largly off of the "Counter" example from mitch garnaat:
http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py"""
def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):
"""Create a new Sequence, using an optional function to
increment to the next number, by default we just increment by one.
Every parameter here is optional, if you don't specify any options
then you'll get a new SequenceGenerator with a random ID stored in the
default domain that increments by one and uses the default botoweb
environment
:param id: Optional ID (name) for this counter
:type id: str
:param domain_name: Optional domain name to use, by default we get this out of the
environment configuration
:type domain_name:str
:param fnc: Optional function to use for the incrementation, by default we just increment by one
There are several functions defined in this module.
Your function must accept "None" to get the initial value
:type fnc: function, str
:param init_val: Initial value, by default this is the first element in your sequence,
but you can pass in any value, even a string if you pass in a function that uses
strings instead of ints to increment
"""
self._db = None
self._value = None
self.last_value = None
self.domain_name = domain_name
self.id = id
if init_val is None:
init_val = fnc(init_val)
if self.id is None:
import uuid
self.id = str(uuid.uuid4())
self.item_type = type(fnc(None))
self.timestamp = None
# Allow us to pass in a full name to a function
if isinstance(fnc, six.string_types):
from boto.utils import find_class
fnc = find_class(fnc)
self.fnc = fnc
# Bootstrap the value last
if not self.val:
self.val = init_val
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_value = []
new_val = {}
new_val['timestamp'] = now
if self._value is not None:
new_val['last_value'] = self._value
expected_value = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_value=expected_value)
self.timestamp = new_val['timestamp']
except SDBResponseError as e:
if e.status == 409:
raise ValueError("Sequence out of sync")
else:
raise
def get(self):
"""Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True)
if val:
if 'timestamp' in val:
self.timestamp = val['timestamp']
if 'current_value' in val:
self._value = self.item_type(val['current_value'])
if "last_value" in val and val['last_value'] is not None:
self.last_value = self.item_type(val['last_value'])
return self._value
val = property(get, set)
def __repr__(self):
return "%s('%s', '%s', '%s.%s', '%s')" % (
self.__class__.__name__,
self.id,
self.domain_name,
self.fnc.__module__, self.fnc.__name__,
self.val)
def _connect(self):
"""Connect to our domain"""
if not self._db:
import boto
sdb = boto.connect_sdb()
if not self.domain_name:
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError as e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
db = property(_connect)
def next(self):
self.val = self.fnc(self.val, self.last_value)
return self.val
def delete(self):
"""Remove this sequence"""
self.db.delete_attributes(self.id)
| mit |
lakshmi-kannan/st2 | st2common/st2common/models/db/liveaction.py | 6 | 4188 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mongoengine as me
from st2common import log as logging
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
from st2common.models.db.notification import NotificationSchema
from st2common.fields import ComplexDateTimeField
from st2common.util import date as date_utils
from st2common.util.secrets import get_secret_parameters
from st2common.util.secrets import mask_secret_parameters
__all__ = [
'LiveActionDB',
]
LOG = logging.getLogger(__name__)
PACK_SEPARATOR = '.'
class LiveActionDB(stormbase.StormFoundationDB):
# TODO: Can status be an enum at the Mongo layer?
status = me.StringField(
required=True,
help_text='The current status of the liveaction.')
start_timestamp = ComplexDateTimeField(
default=date_utils.get_datetime_utc_now,
help_text='The timestamp when the liveaction was created.')
end_timestamp = ComplexDateTimeField(
help_text='The timestamp when the liveaction has finished.')
action = me.StringField(
required=True,
help_text='Reference to the action that has to be executed.')
action_is_workflow = me.BooleanField(
default=False,
help_text='A flag indicating whether the referenced action is a workflow.')
parameters = stormbase.EscapedDynamicField(
default={},
help_text='The key-value pairs passed as to the action runner & execution.')
result = stormbase.EscapedDynamicField(
default={},
help_text='Action defined result.')
context = me.DictField(
default={},
help_text='Contextual information on the action execution.')
callback = me.DictField(
default={},
help_text='Callback information for the on completion of action execution.')
runner_info = me.DictField(
default={},
help_text='Information about the runner which executed this live action (hostname, pid).')
notify = me.EmbeddedDocumentField(NotificationSchema)
meta = {
'indexes': [
{'fields': ['-start_timestamp', 'action']},
{'fields': ['start_timestamp']},
{'fields': ['end_timestamp']},
{'fields': ['action']},
{'fields': ['status']},
]
}
def mask_secrets(self, value):
from st2common.util import action_db
result = copy.deepcopy(value)
execution_parameters = value['parameters']
# TODO: This results into two DB looks, we should cache action and runner type object
# for each liveaction...
#
# ,-'"-.
# . f .--. \
# .\._,\._',' j_
# 7______""-'__`,
parameters = action_db.get_action_parameters_specs(action_ref=self.action)
secret_parameters = get_secret_parameters(parameters=parameters)
result['parameters'] = mask_secret_parameters(parameters=execution_parameters,
secret_parameters=secret_parameters)
return result
def get_masked_parameters(self):
"""
Retrieve parameters with the secrets masked.
:rtype: ``dict``
"""
serializable_dict = self.to_serializable_dict(mask_secrets=True)
return serializable_dict['parameters']
# specialized access objects
liveaction_access = MongoDBAccess(LiveActionDB)
MODELS = [LiveActionDB]
| apache-2.0 |
pybel/pybel-tools | tests/test_mutation/test_inference.py | 1 | 1121 | # -*- coding: utf-8 -*-
"""Tests for inference functions."""
import unittest
from pybel import BELGraph
from pybel.constants import *
from pybel.dsl import protein
from pybel_tools.mutation.inference import infer_missing_two_way_edges
class TestMutationInference(unittest.TestCase):
def test_infer_missing_two_way_edges(self):
graph = BELGraph()
a = protein('HGNC', 'A')
b = protein('HGNC', 'B')
c = protein('HGNC', 'C')
d = protein('HGNC', 'D')
graph.add_node_from_data(a)
graph.add_node_from_data(b)
graph.add_node_from_data(c)
graph.add_node_from_data(d)
graph.add_edge(a, b, **{RELATION: POSITIVE_CORRELATION})
graph.add_edge(a, c, **{RELATION: POSITIVE_CORRELATION})
graph.add_edge(c, b, **{RELATION: NEGATIVE_CORRELATION})
graph.add_edge(a, d, **{RELATION: INCREASES})
infer_missing_two_way_edges(graph)
self.assertTrue(graph.has_edge(b, a))
self.assertTrue(graph.has_edge(c, a))
self.assertTrue(graph.has_edge(c, b))
self.assertFalse(graph.has_edge(d, a))
| mit |
pombredanne/django-rest-framework-json-api | example/tests/integration/test_meta.py | 2 | 1789 | from datetime import datetime
from django.core.urlresolvers import reverse
import pytest
from example.tests.utils import dump_json, redump_json
pytestmark = pytest.mark.django_db
def test_top_level_meta_for_list_view(blog, client):
expected = {
"data": [{
"type": "blogs",
"id": "1",
"attributes": {
"name": blog.name
},
"links": {
"self": 'http://testserver/blogs/1'
},
"meta": {
"copyright": datetime.now().year
},
}],
'links': {
'first': 'http://testserver/blogs?page=1',
'last': 'http://testserver/blogs?page=1',
'next': None,
'prev': None
},
'meta': {
'pagination': {'count': 1, 'page': 1, 'pages': 1},
'apiDocs': '/docs/api/blogs'
}
}
response = client.get(reverse("blog-list"))
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert content_dump == expected_dump
def test_top_level_meta_for_detail_view(blog, client):
expected = {
"data": {
"type": "blogs",
"id": "1",
"attributes": {
"name": blog.name
},
"links": {
"self": "http://testserver/blogs/1"
},
"meta": {
"copyright": datetime.now().year
},
},
"meta": {
"apiDocs": "/docs/api/blogs"
},
}
response = client.get(reverse("blog-detail", kwargs={'pk': blog.pk}))
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert content_dump == expected_dump
| bsd-2-clause |
lunixbochs/sublimelint | lint/edit.py | 1 | 3614 | # edit.py
# buffer editing for both ST2 and ST3 that "just works"
import inspect
import sublime
import sublime_plugin
try:
sublime.sublimelint_edit_storage
except AttributeError:
sublime.sublimelint_edit_storage = {}
def run_callback(func, *args, **kwargs):
spec = inspect.getfullargspec(func)
if spec.args or spec.varargs:
return func(*args, **kwargs)
else:
return func()
class EditFuture:
def __init__(self, func):
self.func = func
def resolve(self, view, edit):
return self.func(view, edit)
class EditStep:
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = args
def run(self, view, edit):
if self.cmd == 'callback':
return run_callback(self.args[0], view, edit)
def insert(edit, pos, text):
pos = min(view.size(), pos)
view.insert(edit, pos, text)
funcs = {
'insert': insert,
'erase': view.erase,
'replace': view.replace,
}
func = funcs.get(self.cmd)
if func:
args = self.resolve_args(view, edit)
func(edit, *args)
def resolve_args(self, view, edit):
args = []
for arg in self.args:
if isinstance(arg, EditFuture):
arg = arg.resolve(view, edit)
args.append(arg)
return args
class Edit:
def __init__(self, view):
self.view = view
self.steps = []
def __nonzero__(self):
return bool(self.steps)
@classmethod
def future(cls, func):
return EditFuture(func)
@classmethod
def defer(cls, view, func):
with Edit(view) as edit:
edit.callback(func)
def step(self, cmd, *args):
step = EditStep(cmd, *args)
self.steps.append(step)
def insert(self, point, string):
self.step('insert', point, string)
def erase(self, region):
self.step('erase', region)
def replace(self, region, string):
self.step('replace', region, string)
def callback(self, func):
self.step('callback', func)
def reselect(self, pos):
def select(view, edit):
region = pos
if hasattr(pos, '__call__'):
region = run_callback(pos, view)
if isinstance(region, int):
region = sublime.Region(region, region)
elif isinstance(region, (tuple, list)):
region = sublime.Region(*region)
view.sel().clear()
view.sel().add(region)
view.show(region, False)
self.callback(select)
def append(self, text):
self.insert(self.view.size(), text)
def run(self, view, edit):
read_only = False
if view.is_read_only():
read_only = True
view.set_read_only(False)
for step in self.steps:
step.run(view, edit)
if read_only:
view.set_read_only(True)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
view = self.view
if sublime.version().startswith('2'):
edit = view.begin_edit()
self.run(edit)
view.end_edit(edit)
else:
key = str(hash(tuple(self.steps)))
sublime.sublimelint_edit_storage[key] = self.run
view.run_command('apply_sublimelint_edit', {'key': key})
class apply_sublimelint_edit(sublime_plugin.TextCommand):
def run(self, edit, key):
sublime.sublimelint_edit_storage.pop(key)(self.view, edit)
| mit |
tobinjt/Flexget | flexget/components/notify/notifiers/email.py | 4 | 7177 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import text_to_native_str
import logging
import smtplib
import socket
import getpass
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTPAuthenticationError, SMTPServerDisconnected, SMTPSenderRefused
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
plugin_name = 'email'
log = logging.getLogger(plugin_name)
class EmailNotifier(object):
"""
Send an e-mail with the list of all succeeded (downloaded) entries.
Configuration options
=============== ===================================================================
Option Description
=============== ===================================================================
from The email address from which the email will be sent (required)
to The email address of the recipient (required)
smtp_host The host of the smtp server
smtp_port The port of the smtp server
smtp_username The username to use to connect to the smtp server
smtp_password The password to use to connect to the smtp server
smtp_tls Should we use TLS to connect to the smtp server
smtp_ssl Should we use SSL to connect to the smtp server
=============== ===================================================================
Config basic example::
notify:
entries:
via:
- email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
Config example with smtp login::
notify:
entries:
via:
- email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
smtp_port: 25
smtp_login: true
smtp_username: my_smtp_login
smtp_password: my_smtp_password
smtp_tls: true
GMAIL example::
notify:
entries:
via:
- email:
from: [email protected]
to: [email protected]
smtp_host: smtp.gmail.com
smtp_port: 587
smtp_login: true
smtp_username: gmailUser
smtp_password: gmailPassword
smtp_tls: true
Default values for the config elements::
notify:
entries:
via:
- email:
smtp_host: localhost
smtp_port: 25
smtp_login: False
smtp_username:
smtp_password:
smtp_tls: False
smtp_ssl: False
"""
def __init__(self):
self.mail_server = None
self.host = None
self.port = None
self.username = None
self.password = None
self.ssl = None
self.tls = None
def connect_to_smtp_server(self, config):
self.host = config['smtp_host']
self.port = config['smtp_port']
self.ssl = config['smtp_ssl']
self.tls = config['smtp_tls']
self.username = config.get('smtp_username')
self.password = config.get('smtp_password')
try:
log.debug('connecting to smtp server %s:%s', self.host, self.port)
self.mail_server = smtplib.SMTP_SSL if self.ssl else smtplib.SMTP
self.mail_server = self.mail_server(self.host, self.port)
if self.tls:
self.mail_server.ehlo()
self.mail_server.starttls()
self.mail_server.ehlo()
except (socket.error, OSError) as e:
raise PluginWarning(str(e))
try:
if self.username:
# Forcing to use `str` type
log.debug('logging in to smtp server using username: %s', self.username)
self.mail_server.login(
text_to_native_str(self.username), text_to_native_str(self.password)
)
except (IOError, SMTPAuthenticationError) as e:
raise PluginWarning(str(e))
schema = {
'type': 'object',
'properties': {
'to': one_or_more({'type': 'string', 'format': 'email'}),
'from': {
'type': 'string',
'default': '[email protected]',
'format': 'email',
},
'autofrom': {'type': 'boolean', 'default': False},
'smtp_host': {'type': 'string', 'default': 'localhost'},
'smtp_port': {'type': 'integer', 'default': 25},
'smtp_username': {'type': 'string'},
'smtp_password': {'type': 'string'},
'smtp_tls': {'type': 'boolean', 'default': False},
'smtp_ssl': {'type': 'boolean', 'default': False},
'html': {'type': 'boolean', 'default': False},
},
'required': ['to'],
'dependencies': {
'smtp_username': ['smtp_password'],
'smtp_password': ['smtp_username'],
'smtp_ssl': ['smtp_tls'],
},
'additionalProperties': False,
}
def notify(self, title, message, config):
"""
Send an email notification
:param str message: message body
:param str title: message subject
:param dict config: email plugin config
"""
if not isinstance(config['to'], list):
config['to'] = [config['to']]
email = MIMEMultipart('alternative')
email['To'] = ','.join(config['to'])
email['From'] = (
getpass.getuser() + '@' + socket.getfqdn() if config['autofrom'] else config['from']
)
email['Subject'] = title
email['Date'] = formatdate(localtime=True)
content_type = 'html' if config['html'] else 'plain'
email.attach(MIMEText(message.encode('utf-8'), content_type, _charset='utf-8'))
# Making sure mail server connection will remain open per host or username
# (in case several mail servers are used in the same task)
if not self.mail_server or not (
self.host == config['smtp_host'] and self.username == config.get('smtp_username')
):
self.connect_to_smtp_server(config)
connection_error = None
while True:
try:
self.mail_server.sendmail(email['From'], config['to'], email.as_string())
break
except (SMTPServerDisconnected, SMTPSenderRefused) as e:
if not connection_error:
self.connect_to_smtp_server(config)
connection_error = e
else:
raise PluginWarning('Could not connect to SMTP server: %s' % str(e))
@event('plugin.register')
def register_plugin():
plugin.register(EmailNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
| mit |
belokop/indico_bare | indico/MaKaC/common/Locators.py | 2 | 1943 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from markupsafe import escape
# XXX: do not use this in new code! use a plain dict instead!
class Locator(dict):
"""Helper class specialising UserDict (dictionary) which contains a locator
for an object. This is needed due to the id schema chosen and to the
web needs: it is a relative id schema (a suboject is given an id which
is unique only inside its superobject) and we need to uniquely identify
some objects on the web pages so it is needed to handle the "locator"
which can be made up of various ids. This class will contain the locator
and provide methods for using it on the web pages so it's use is
transparent for client.
"""
def getWebForm(self):
"""Returns the current locator for being used in web pages forms
(hidden parameters)
"""
l = []
for item, val in self.iteritems():
if isinstance(val, list):
for v in val:
l.append('<input type="hidden" name="{}" value="{}">'.format(item, escape(v)))
else:
l.append('<input type="hidden" name="{}" value="{}">'.format(item, escape(val)))
return "\n".join(l)
| gpl-3.0 |
QuLogic/vispy | examples/tutorial/gloo/colored_quad.py | 18 | 1648 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: Nicolas P .Rougier
# Date: 04/03/2014
# -----------------------------------------------------------------------------
from vispy import app, gloo
from vispy.gloo import Program
vertex = """
attribute vec4 color;
attribute vec2 position;
varying vec4 v_color;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
v_color = color;
} """
fragment = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
} """
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(512, 512), title='Colored quad',
keys='interactive')
# Build program & data
self.program = Program(vertex, fragment, count=4)
self.program['color'] = [(1, 0, 0, 1), (0, 1, 0, 1),
(0, 0, 1, 1), (1, 1, 0, 1)]
self.program['position'] = [(-1, -1), (-1, +1),
(+1, -1), (+1, +1)]
gloo.set_viewport(0, 0, *self.physical_size)
self.show()
def on_draw(self, event):
gloo.clear(color='white')
self.program.draw('triangle_strip')
def on_resize(self, event):
gloo.set_viewport(0, 0, *event.physical_size)
if __name__ == '__main__':
c = Canvas()
app.run()
| bsd-3-clause |
cartersgenes/namebench | nb_third_party/httplib2/__init__.py | 451 | 51082 | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, '') != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| apache-2.0 |
1flow/1flow | oneflow/core/forms/chaineditem.py | 2 | 2164 | # -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <[email protected]>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
# import random
import logging
# from constance import config
import autocomplete_light
from django.conf import settings
from django import forms
# from django.utils.translation import ugettext as _
from codemirror import CodeMirrorTextarea
from ..models import ChainedItem
LOGGER = logging.getLogger(__name__)
class ChainedItemForm(autocomplete_light.ModelForm):
""" A simple chained item model form. """
class Meta:
model = ChainedItem
exclude = ('chain', 'item', 'position',
'is_active', 'is_valid', 'check_error', 'notes', )
# fields = ('item', 'is_active',
# 'parameters', )
class ChainedItemPositionForm(forms.ModelForm):
""" A chained item model form to update its position in list. """
class Meta:
model = ChainedItem
fields = ('position', )
class ChainedItemEditParametersForm(forms.ModelForm):
""" Edit a chained item model parameters. """
# Catched in the edit_field modal, avoid ESC/click-outside.
prevent_accidental_close = True
class Meta:
model = ChainedItem
fields = ('parameters', )
widgets = {
'parameters': CodeMirrorTextarea(
mode='yaml',
addon_js=settings.CODEMIRROR_ADDONS_JS,
addon_css=settings.CODEMIRROR_ADDONS_CSS,
keymap=settings.CODEMIRROR_KEYMAP,
)
}
| agpl-3.0 |
jtoppins/beaker | IntegrationTests/src/bkr/inttest/client/test_job_list.py | 1 | 7304 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from turbogears.database import session
from bkr.inttest import data_setup, with_transaction
from bkr.inttest.client import run_client, create_client_config, ClientError, \
ClientTestCase
import json
from bkr.server.model import TaskStatus
class JobListTest(ClientTestCase):
@with_transaction
def setUp(self):
jobs_to_generate = 2;
self.products = [data_setup.create_product() for product in range(jobs_to_generate)]
self.users = [data_setup.create_user(password='mypass') for user in range(jobs_to_generate)]
self.jobs = [data_setup.create_completed_job(product=self.products[x], owner=self.users[x]) for x in range(jobs_to_generate)]
self.client_configs = [create_client_config(username=user.user_name, password='mypass') for user in self.users]
def test_list_jobs_by_product(self):
out = run_client(['bkr', 'job-list', '--product', self.products[0].name])
self.assert_(self.jobs[0].t_id in out, [self.jobs[0].t_id, out])
self.assertRaises(ClientError, run_client, ['bkr', 'job-list', '--product', 'foobar'])
def test_list_jobs_by_owner(self):
out = run_client(['bkr', 'job-list', '--owner', self.users[0].user_name])
self.assert_(self.jobs[0].t_id in out, out)
out = run_client(['bkr', 'job-list', '--owner', self.users[0].user_name, '--limit', '1'])
self.assert_(len(out[0]) == 1, out)
out = run_client(['bkr', 'job-list', '--owner', 'foobar'])
self.assert_(self.jobs[0].t_id not in out, out)
out = run_client(['bkr', 'job-list', '--owner', self.users[0].user_name, '--min-id', \
'{0}'.format(self.jobs[0].id), '--max-id', '{0}'.format(self.jobs[0].id)])
self.assert_(self.jobs[0].t_id in out and self.jobs[1].t_id not in out)
def test_list_jobs_by_whiteboard(self):
out = run_client(['bkr', 'job-list', '--whiteboard', self.jobs[0].whiteboard])
self.assert_(self.jobs[0].t_id in out, out)
out = run_client(['bkr', 'job-list', '--whiteboard', 'foobar'])
self.assert_(self.jobs[0].t_id not in out, out)
# https://bugzilla.redhat.com/show_bug.cgi?id=1277340
def test_list_jobs_by_whiteboard_substring(self):
with session.begin():
included_job = data_setup.create_completed_job(whiteboard=u'Prince of Penzance')
excluded_job = data_setup.create_completed_job(whiteboard=u'Princess of Persia')
out = run_client(['bkr', 'job-list', '--format=list', '--whiteboard=penzance'])
listed_job_ids = out.splitlines()
self.assertIn(included_job.t_id, listed_job_ids)
self.assertNotIn(excluded_job.t_id, listed_job_ids)
# This was accidental undocumented functionality supported by the
# original implementation of jobs.filter. Some people are probably
# relying on it.
out = run_client(['bkr', 'job-list', '--format=list', '--whiteboard=p%z_nce'])
listed_job_ids = out.splitlines()
self.assertIn(included_job.t_id, listed_job_ids)
self.assertNotIn(excluded_job.t_id, listed_job_ids)
# https://bugzilla.redhat.com/show_bug.cgi?id=1229938
def test_list_jobs_by_retention_tag(self):
with session.begin():
job_tagged_scratch = data_setup.create_completed_job(
retention_tag=u'scratch')
job_tagged_audit = data_setup.create_completed_job(
retention_tag=u'audit', product=data_setup.create_product())
out = run_client(['bkr', 'job-list', '--format=json', '--tag=audit'])
joblist = json.loads(out)
self.assertIn(job_tagged_audit.t_id, joblist)
self.assertNotIn(job_tagged_scratch.t_id, joblist)
out = run_client(['bkr', 'job-list', '--format=json', '--tag=scratch'])
joblist = json.loads(out)
self.assertIn(job_tagged_scratch.t_id, joblist)
self.assertNotIn(job_tagged_audit.t_id, joblist)
#https://bugzilla.redhat.com/show_bug.cgi?id=816490
def test_list_jobs_by_jid(self):
out = run_client(['bkr', 'job-list', '--min-id', '{0}'.format(self.jobs[1].id)])
self.assert_(self.jobs[1].t_id in out and self.jobs[0].t_id not in out)
out = run_client(['bkr', 'job-list', '--max-id', '{0}'.format(self.jobs[0].id)])
self.assert_(self.jobs[0].t_id in out and self.jobs[1].t_id not in out)
#https://bugzilla.redhat.com/show_bug.cgi?id=907650
def test_list_jobs_mine(self):
out = run_client(['bkr', 'job-list', '--mine'], config=self.client_configs[0])
self.assert_(self.jobs[0].t_id in out and self.jobs[1].t_id not in out, out)
out = run_client(['bkr', 'job-list', '--mine',
'--format','json'],
config=self.client_configs[0])
out = json.loads(out)
self.assertIn(self.jobs[0].t_id, out)
self.assertNotIn(self.jobs[1].t_id, out)
self.assertRaises(ClientError, run_client, ['bkr', 'job-list', '--mine', \
'--username', 'xyz',\
'--password','xyz'])
def test_cannot_specify_finished_and_unfinished_at_the_same_time (self):
try:
run_client(['bkr', 'job-list', '--finished', '--unfinished'])
self.fail('should raise')
except ClientError, e:
self.assertEqual(e.status, 2)
self.assertIn("Only one of --finished or --unfinished may be specified", e.stderr_output)
def test_filter_finished_jobs(self):
with session.begin():
completed_job = data_setup.create_completed_job(task_status=TaskStatus.completed)
cancelled_job = data_setup.create_completed_job(task_status=TaskStatus.cancelled)
aborted_job = data_setup.create_completed_job(task_status=TaskStatus.aborted)
running_job = data_setup.create_running_job()
out = run_client(['bkr', 'job-list', '--finished'])
self.assertIn(completed_job.t_id, out)
self.assertIn(cancelled_job.t_id, out)
self.assertIn(aborted_job.t_id, out)
self.assertNotIn(running_job.t_id, out)
# https://bugzilla.redhat.com/show_bug.cgi?id=1175853
def test_filter_unfinished_jobs(self):
with session.begin():
queued_job = data_setup.create_queued_job()
running_job = data_setup.create_running_job()
waiting_job = data_setup.create_waiting_job()
scheduled_job = data_setup.create_scheduled_job()
installing_job = data_setup.create_installing_job()
completed_job = data_setup.create_completed_job()
out = run_client(['bkr', 'job-list', '--unfinished'])
self.assertIn(queued_job.t_id, out)
self.assertIn(running_job.t_id, out)
self.assertIn(waiting_job.t_id, out)
self.assertIn(scheduled_job.t_id, out)
self.assertIn(installing_job.t_id, out)
self.assertNotIn(completed_job.t_id, out) | gpl-2.0 |
jchevin/MissionPlanner-master | Lib/distutils/command/config.py | 75 | 13130 | """distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
__revision__ = "$Id$"
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.ccompiler import customize_compiler
from distutils import log
LANG_EXT = {'c': '.c', 'c++': '.cxx'}
class config(Command):
description = "prepare to build"
user_options = [
('compiler=', None,
"specify the compiler type"),
('cc=', None,
"specify the compiler executable"),
('include-dirs=', 'I',
"list of directories to search for header files"),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries"),
('noisy', None,
"show every action (compile, link, run, ...) taken"),
('dump-source', None,
"dump generated source files before attempting to compile them"),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run, force=1)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
file = open(filename, "w")
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
file.close()
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs,
lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable([obj], prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", ' '.join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = 1
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = 0
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
file = open(out)
match = 0
while 1:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = 1
break
file.close()
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = 1
except CompileError:
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
ok = 1
except (CompileError, LinkError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
self.spawn([exe])
ok = 1
except (CompileError, LinkError, DistutilsExecError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(self, func, headers=None, include_dirs=None,
libraries=None, library_dirs=None, decl=0, call=0):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
# check_func ()
def check_lib(self, library, library_dirs=None, headers=None,
include_dirs=None, other_libraries=[]):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link("int main (void) { }",
headers, include_dirs,
[library]+other_libraries, library_dirs)
def check_header(self, header, include_dirs=None, library_dirs=None,
lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(body="/* No body */", headers=[header],
include_dirs=include_dirs)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info('%s' % filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
| gpl-3.0 |
SNoiraud/gramps | gramps/gen/filters/rules/event/_hassourcecount.py | 5 | 1756 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "People having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""Events with sources"""
name = _('Events with <count> sources')
description = _("Matches events with a certain number of sources connected to it")
| gpl-2.0 |
heran7/edx-platform | common/lib/xmodule/xmodule/abtest_module.py | 9 | 5266 | import random
import logging
from lxml import etree
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xmodule.xml_module import XmlDescriptor
from xmodule.exceptions import InvalidDefinitionError
from xblock.fields import String, Scope, Dict
DEFAULT = "_DEFAULT_GROUP"
log = logging.getLogger(__name__)
def group_from_value(groups, v):
"""
Given group: (('a', 0.3), ('b', 0.4), ('c', 0.3)) and random value v
in [0,1], return the associated group (in the above case, return
'a' if v < 0.3, 'b' if 0.3 <= v < 0.7, and 'c' if v > 0.7
"""
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
# Round off errors might cause us to run to the end of the list.
# If the do, return the last element.
return g
class ABTestFields(object):
group_portions = Dict(help="What proportions of students should go in each group", default={DEFAULT: 1}, scope=Scope.content)
group_assignments = Dict(help="What group this user belongs to", scope=Scope.preferences, default={})
group_content = Dict(help="What content to display to each group", scope=Scope.content, default={DEFAULT: []})
experiment = String(help="Experiment that this A/B test belongs to", scope=Scope.content)
has_children = True
class ABTestModule(ABTestFields, XModule):
"""
Implements an A/B test with an aribtrary number of competing groups
"""
def __init__(self, *args, **kwargs):
XModule.__init__(self, *args, **kwargs)
if self.group is None:
self.group = group_from_value(
self.group_portions.items(),
random.uniform(0, 1)
)
@property
def group(self):
return self.group_assignments.get(self.experiment)
@group.setter
def group(self, value):
self.group_assignments[self.experiment] = value
@group.deleter
def group(self):
del self.group_assignments[self.experiment]
def get_child_descriptors(self):
active_locations = set(self.group_content[self.group])
return [desc for desc in self.descriptor.get_children() if desc.location.url() in active_locations]
def displayable_items(self):
# Most modules return "self" as the displayable_item. We never display ourself
# (which is why we don't implement get_html). We only display our children.
return self.get_children()
# TODO (cpennington): Use Groups should be a first class object, rather than being
# managed by ABTests
class ABTestDescriptor(ABTestFields, RawDescriptor, XmlDescriptor):
module_class = ABTestModule
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
XML Format:
<abtest experiment="experiment_name">
<group name="a" portion=".1"><contenta/></group>
<group name="b" portion=".2"><contentb/></group>
<default><contentdefault/></default>
</abtest>
"""
experiment = xml_object.get('experiment')
if experiment is None:
raise InvalidDefinitionError(
"ABTests must specify an experiment. Not found in:\n{xml}"
.format(xml=etree.tostring(xml_object, pretty_print=True)))
group_portions = {}
group_content = {}
children = []
for group in xml_object:
if group.tag == 'default':
name = DEFAULT
else:
name = group.get('name')
group_portions[name] = float(group.get('portion', 0))
child_content_urls = []
for child in group:
try:
child_content_urls.append(system.process_xml(etree.tostring(child)).location.url())
except:
log.exception("Unable to load child when parsing ABTest. Continuing...")
continue
group_content[name] = child_content_urls
children.extend(child_content_urls)
default_portion = 1 - sum(
portion for (name, portion) in group_portions.items()
)
if default_portion < 0:
raise InvalidDefinitionError("ABTest portions must add up to less than or equal to 1")
group_portions[DEFAULT] = default_portion
children.sort()
return {
'group_portions': group_portions,
'group_content': group_content,
}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('abtest')
xml_object.set('experiment', self.experiment)
for name, group in self.group_content.items():
if name == DEFAULT:
group_elem = etree.SubElement(xml_object, 'default')
else:
group_elem = etree.SubElement(xml_object, 'group', attrib={
'portion': str(self.group_portions[name]),
'name': name,
})
for child_loc in group:
child = self.system.load_item(child_loc)
group_elem.append(etree.fromstring(child.export_to_xml(resource_fs)))
return xml_object
def has_dynamic_children(self):
return True
| agpl-3.0 |
theCatWisel/ThreatExchange | pytx/pytx/batch.py | 6 | 4375 | import json
from .access_token import get_access_token
from .request import Broker
from .vocabulary import Batch as b
from .vocabulary import ThreatExchange as t
from .errors import (
pytxFetchError
)
class Batch(object):
"""
Class for making Batch requests to the API.
"""
def __init__(self, **kwargs):
"""
Initialized the object.
"""
@classmethod
def get_relative(cls, url):
"""
Parse the full URL to get the relative URL.
"""
return url.replace(t.URL, '')
@classmethod
def prepare_single_request(cls, request, name=None):
"""
Prepare a single request to be included in batch.
:param request: A dictionary in the format required by Batch.submit().
:type request: dict
:param name: A name to give this request.
:type name: str
:returns: dict
"""
d = {b.METHOD: request.get('type',
request.get('method', 'GET')),
b.RELATIVE_URL: Batch.get_relative(request.get('url',
request.get('relative_url', '')))}
body = request.get('body', None)
if body:
d[b.BODY] = body
if name:
d['name'] = name
return d
@classmethod
def submit(cls,
*args,
**kwargs):
"""
Submit batch request. All non-named args are considered to be
dictionaries containing the following:
type: The request type (GET, POST, etc.).
url: The full or relative URL for the API call.
body: If the type is POST this is the body that will be used.
If you use "method" instead of "type" and/or "relative_urL" instead of
"url" (which is accurate to the Graph API) we will use them
appropriately.
If you pass a named argument, we will consider the name as the name you
wish to include in that specific request. This is useful for referencing
a request in another request in the Batch (see FB documentation).
The following named args are considered to be the options below.
:param include_headers: Include headers in response.
:type include_headers: bool
:param omit_response: Omit response on success.
:type omit_response: bool
:param retries: Number of retries before stopping.
:type retries: int
:param headers: header info for requests.
:type headers: dict
:param proxies: proxy info for requests.
:type proxies: dict
:param verify: verify info for requests.
:type verify: bool, str
:returns: dict (using json.loads())
"""
batch = []
retries = kwargs.get('retries', None)
if retries:
del kwargs['retries']
headers = kwargs.get('headers', None)
if headers:
del kwargs['headers']
proxies = kwargs.get('proxies', None)
if proxies:
del kwargs['proxies']
verify = kwargs.get('verify', None)
if verify:
del kwargs['verify']
include_headers = kwargs.get('include_headers', None)
if include_headers:
del kwargs['include_headers']
include_headers = Broker.sanitize_bool(include_headers)
omit_response = kwargs.get('omit_response', None)
if omit_response:
del kwargs['omit_response']
omit_response = Broker.sanitize_bool(omit_response)
for arg in args:
batch.append(Batch.prepare_single_request(arg))
for key, value in kwargs.iteritems():
batch.append(Batch.prepare_single_request(value, name=key))
params = {t.ACCESS_TOKEN: get_access_token(),
t.BATCH: json.dumps(batch),
t.INCLUDE_HEADERS: include_headers,
t.OMIT_RESPONSE_ON_SUCCESS: omit_response}
try:
return Broker.post(t.URL,
params=params,
retries=retries,
headers=headers,
proxies=proxies,
verify=verify)
except:
raise pytxFetchError('Error with batch request.')
| bsd-3-clause |
bpedman/py-notify-async | notify/base.py | 2 | 42093 | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
Mostly internal module that contains functionality common to L{conditions <condition>} and
L{variables <variable>}. You can use C{L{AbstractValueObject}} class directly, if really
needed, but almost always conditions or variables is what you need.
"""
__docformat__ = 'epytext en'
__all__ = ('AbstractValueObject',)
import sys
from notify.mediator import AbstractMediator
from notify.signal import AbstractSignal, Signal
from notify.utils import execute, is_callable, is_valid_identifier, mangle_identifier, \
raise_not_implemented_exception, StringType
try:
import contextlib
except ImportError:
# Ignore, related features will not be provided.
pass
#-- Base class for conditions and variables --------------------------
class AbstractValueObject (object):
"""
Base class for C{L{AbstractCondition <condition.AbstractCondition>}} and
C{L{AbstractVariable <variable.AbstractVariable>}} implementing common functionality.
@group Basic:
get, set, mutable, changed
@group Storing Using Handlers:
store, store_safe, storing, storing_safely
@group Synchronizing Two Objects:
synchronize, synchronize_safe, desynchronize, desynchronize_fully, synchronizing,
synchronizing_safely
@group Freezing Value Changes:
is_frozen, changes_frozen, with_changes_frozen
@group Methods for Subclasses:
_is_mutable, _value_changed, _create_signal, _has_signal, _remove_signal,
_additional_description
@group Internals:
__get_changed_signal, __to_string, __flags, __signal
@sort:
get, set, mutable, changed,
store, store_safe, storing, storing_safely,
synchronize, synchronize_safe, desynchronize, desynchronize_fully, synchronizing,
synchronizing_safely,
is_frozen, changes_frozen, with_changes_frozen,
_is_mutable, _value_changed, _create_signal, _has_signal, _remove_signal,
_additional_description
"""
__slots__ = ('__weakref__', '__signal', '__flags')
# Implementation note: `__flags' are a sum of following values:
# * 0 if there is no signal, 1 if `__signal' is an `AbstractSignal' instance, 2 if it
# is a reference to one;
# * 0 if the object is not frozen, -4 if it is.
#
# The reason for the first term is to save one (slow) isinstance() call per `changed'
# emission. The second term is actually needed anyway, it is only `weird', since we
# need to combine with the first.
#
# We rely on Python's caching of small integers, otherwise this does waste memory.
def __init__(self):
"""
Initialize new C{L{AbstractValueObject}}. Base class only has (internal) field
for ‘changed’ signal. You may assume that the signal is only created when
C{L{changed}} property is read for the first time.
"""
super (AbstractValueObject, self).__init__()
# For optimization reasons, `__signal' is created only when it is needed for the
# first time. This may improve memory consumption if there are many unused
# properties.
self.__signal = None
self.__flags = 0
def get (self):
"""
Get the current value of the object. Note that the default implementation just
raises C{NotImplementedError}, since the current value is not stored by default.
@rtype: C{object}
"""
raise_not_implemented_exception (self)
def set (self, value):
"""
Set the current value to C{value}, if possible. Default implementation always
raises C{NotImplementedError} as it is not mutable.
@param value: new value for C{AbstractValueObject}.
@rtype: C{bool}
@returns: Whether setting value had any effect, i.e. C{True}
if C{value} is not equal to result of C{L{get}}
method.
@raises NotImplementedError: if the object is not mutable.
@raises ValueError: if C{value} is not suitable for some reason.
"""
raise_not_implemented_exception (self)
def _is_mutable (self):
"""
Determine if object is mutable and thus if C{L{set}} method can be called at all.
Default implementation assumes that if derived class overrides C{set} method, its
instances are mutable. This method should be overriden if that’s not the case.
This method may be used from outside, but you should consider using C{L{mutable}}
property instead, as it is public and more convenient.
@rtype: C{bool}
"""
return self.set.im_func is not AbstractValueObject.set.im_func
if sys.version_info[0] >= 3:
def temp (self):
return self.set.__func__ is not AbstractValueObject.set
temp.__doc__ = _is_mutable.__doc__
_is_mutable = temp
del temp
mutable = property (lambda self: self._is_mutable (),
doc = ("""
Read-only property indicating if this object is mutable.
In other words, if object’s value can be changed by
C{L{set}} method, or if it is computed by some means and
not settable from outside.
@type: bool
"""))
def __get_changed_signal (self):
flags = self.__flags
if flags & 3:
if flags & 1:
return self.__signal
else:
return self.__signal ()
signal, self.__signal = self._create_signal ()
if self.__signal is signal:
self.__flags += 1
else:
self.__flags += 2
return signal
def _create_signal (self):
"""
Create the signal that will be returned by C{L{changed}} property. Default
implementation returns an instance of C{L{Signal <signal.Signal>}} class without
accumulator, but derived classes may wish to override this.
Note that this method will be called only when getting C{changed} property and
only if there is no signal yet. I.e. only for the first reading at all or first
reading after a call to C{L{_remove_signal}}.
Return value of this method is a little bit tricky. It must return a tuple of two
objects, first being an instance of C{L{AbstractSignal}}. The second object must
be either the same signal object I{or} a reference to it, i.e. object with
C{__call__} method returning the signal. In the second case, you will most likely
want to return a weak reference, but there is no restriction.
@rtype: C{L{AbstractSignal}}, C{object}
@returns: See method description for details.
"""
signal = Signal ()
return signal, signal
def _has_signal (self):
"""
Determine if there is currently a ‘changed’ signal. This is C{True} only if
C{L{_create_signal}} has been called and the call to it was not followed by
C{L{_remove_signal}}.
This method I{can} be called from outside, but should normally be left to
subclasses of C{AbstractValueObject}.
@rtype: C{bool}
"""
return self.__signal is not None
def _remove_signal (self, signal):
"""
Remove current ‘changed’ signal if it is the same object as specified by C{signal}
argument. If signal is removed, C{True} is returned. Signal must only be removed
if it has no handlers, to save memory, but it is allowed to call this method in
other cases when its argument is guaranteed to be different from the ‘changed’
signal.
This function I{must not} be called from outside. It is only for descendant
classes’ use.
@param signal: the signal object or a reference to signal to remove (the same as
C{L{_create_signal}} returned.)
@rtype: C{bool}
@returns: Whether ‘changed’ signal is removed.
"""
if self.__signal is signal:
self.__signal = None
self.__flags &= ~3
return True
else:
return False
def store (self, handler, *arguments, **keywords):
"""
Make sure current value is ‘transmitted’ to C{handler} (with C{arguments}). This
means that the C{handler} is called once with the C{arguments} and the current
value and afterwards each time the current value changes. The only argument
passed to C{handler} in addition to specified ones is the value as returned by the
C{L{get}} method.
See C{L{AbstractSignal.connect}} method description for details how C{arguments}
are handled.
@param handler: the object to store value with.
@type handler: callable
@param arguments: optional arguments prepended to the current value when invoking
C{handler}.
@raises TypeError: if C{handler} is not callable or cannot be called with
C{arguments} and current object value.
"""
handler (*(arguments + (self.get (),)), **keywords)
self.__get_changed_signal ().connect (handler, *arguments, **keywords)
def store_safe (self, handler, *arguments, **keywords):
"""
Like C{L{store}}, except that if C{handler} is already connected to this object’s
‘changed’ signal, this method does nothing. See C{L{Signal.connect_safe}} method
for details.
@param handler: the object to store value with.
@type handler: callable
@param arguments: optional arguments prepended to the current value when invoking
C{handler}.
@rtype: C{bool}
@returns: Whether C{handler} is connected to ‘changed’ signal.
@raises TypeError: if C{handler} is not callable or cannot be called with
C{arguments} and current object value.
"""
if not self.__get_changed_signal ().is_connected (handler, *arguments):
handler (*(arguments + (self.get (),)), **keywords)
self.__get_changed_signal ().connect (handler, *arguments, **keywords)
return True
else:
return False
def synchronize (self, value_object, mediator = None):
"""
Synchronize own value with that of C{value_object}. Both objects must be mutable.
Value determined by C{value_object.get()} is first passed to C{self.set}. After
that, each object’s C{L{set}} method is connected to other object ‘changed’
signal. This guarantees that objects’ values remain the same if no exception
occurs. (Except that C{mediator}, if not C{None}, or either objects’ C{set}
method can modify passed value.)
If C{mediator} is not C{None}, values copied from C{value_object} to C{self} are
passed through its ‘forward’ transformation, the way round—through ‘back’
transformation. See L{mediators description <mediator>} for details.
@param value_object: other value object to synchronize with.
@type value_object: C{AbstractValueObject}
@param mediator: optional mediator to transform values between C{self} and
C{value_object}.
@type mediator: C{L{AbstractMediator}} or C{None}
@raises TypeError: if C{value_object} is not an C{AbstractValueObject} or
C{mediator} is neither C{None} nor an instance of
C{L{AbstractMediator <mediator.AbstractMediator>}}.
@raises ValueError: if C{self} and C{value_object} are the same object.
@raises ValueError: if either C{self} or C{value_object} is not mutable.
@raises ValueError: if current value of C{value_object} is not suitable for
C{self}.
"""
if not isinstance (value_object, AbstractValueObject):
raise TypeError ("can only synchronize with other 'AbstractValueObject' instances")
if value_object is self:
raise ValueError ("can only synchronize with other 'AbstractValueObject' instances")
if not self._is_mutable ():
raise ValueError ("'%s' is not mutable" % self)
if not value_object._is_mutable ():
raise ValueError ("'%s' is not mutable" % value_object)
if mediator is None:
# Note: order is important!
value_object.store (self.set)
self.__get_changed_signal ().connect (value_object.set)
else:
if not isinstance (mediator, AbstractMediator):
raise TypeError ('second argument must be a mediator')
# Note: order is important!
value_object.store (mediator.forward (self.set))
self.__get_changed_signal ().connect (mediator.back (value_object.set))
def synchronize_safe (self, value_object, mediator = None):
"""
Like C{L{synchronize}} except that uses C{L{store_safe}} instead of L{store}. See
C{L{synchronize}} for details.
@param value_object: other value object to synchronize with.
@type value_object: C{AbstractValueObject}
@param mediator: optional mediator to transform values between C{self} and
C{value_object}.
@type mediator: C{L{AbstractMediator}} or C{None}
@raises TypeError: if C{value_object} is not an C{AbstractValueObject} or
C{mediator} is neither C{None} nor an instance of
C{L{AbstractMediator <mediator.AbstractMediator>}}.
@raises ValueError: if C{self} and C{value_object} are the same object.
@raises ValueError: if either C{self} or C{value_object} is not mutable.
@raises ValueError: if current value of C{value_object} is not suitable for
C{self}.
"""
if not isinstance (value_object, AbstractValueObject):
raise TypeError ("can only synchronize with other 'AbstractValueObject' instances")
if value_object is self:
raise ValueError ("can only synchronize with other 'AbstractValueObject' instances")
if not value_object._is_mutable ():
raise ValueError ("target 'AbstractValueObject' instance is not mutable")
if mediator is None:
# Note: order is important!
value_object.store_safe (self.set)
self.__get_changed_signal ().connect_safe (value_object.set)
else:
if not isinstance (mediator, AbstractMediator):
raise TypeError ('second argument must be a mediator')
# Note: order is important!
value_object.store_safe (mediator.forward (self.set))
self.__get_changed_signal ().connect_safe (mediator.back (value_object.set))
def desynchronize (self, value_object, mediator = None):
"""
Desynchronize own value with that of C{value_object}. Note that values do not
start to differ immediately, actually, this method doesn’t change values at all.
Rather, values will not be synchronized anymore if any of the two changes. If
C{mediator} is not C{None}, this method cancels effect of a call to
C{L{synchronize}} with the same or equal mediator only.
Note that C{desynchronize} must be called the same number of times as
C{synchronize} in order to cancel future synchronization. If you need to do that
in one call and regardless of how many times the latter has been called, use
C{L{desynchronize_fully}} method instead.
@param value_object: other value object to desynchronize with.
@type value_object: C{AbstractValueObject}
@param mediator: mediator, equal to what was passed to corresponding call
to C{L{synchronize}}.
@type mediator: C{L{AbstractMediator}} or C{None}
@rtype: C{bool}
@returns: Whether C{self} and C{value_object} have been synchronized
before (using C{mediator} if it is not C{None}.)
@raises TypeError: if C{mediator} is neither C{None} nor an instance of
C{L{AbstractMediator <mediator.AbstractMediator>}}.
@note:
If C{L{set}} method of one of the values has been connected to other value’s
‘changed’ signal, but otherwise is not true, this method does nothing and returns
C{False}. So, it should be safe unless C{L{synchronize}} has been called or its
effect has been emulated manually.
"""
if mediator is not None and not isinstance (mediator, AbstractMediator):
raise TypeError ('second argument must be a mediator')
if ( not isinstance (value_object, AbstractValueObject)
or not value_object._is_mutable ()
or not self._has_signal ()
or not value_object._has_signal ()):
return False
if mediator is None:
forward = self.set
back = value_object.set
else:
forward = mediator.forward (self.set)
back = mediator.back (value_object.set)
if ( value_object.__get_changed_signal ().is_connected (forward)
and self .__get_changed_signal ().is_connected (back)):
value_object.__get_changed_signal ().disconnect (forward)
self. __get_changed_signal ().disconnect (back)
return True
else:
return False
def desynchronize_fully (self, value_object, mediator = None):
"""
Desynchronize own value with that of C{value_object}. Note that values do not
start to differ immediately, actually, this method doesn’t change values at all.
Rather, values will not be synchronized anymore if any of the two changes. If
C{mediator} is not C{None}, this method cancels effect of a call to
C{L{synchronize}} with the same or equal mediator only.
Note that C{desynchronize_fully} cancels future synchronization regardless of how
many times C{synchronize} has been called. If that is not what you want, use
C{L{desynchronize}} method instead.
@param value_object: other value object to desynchronize with.
@type value_object: C{AbstractValueObject}
@param mediator: mediator, equal to what was passed to corresponding call
to C{L{synchronize}}.
@type mediator: C{L{AbstractMediator}} or C{None}
@rtype: C{bool}
@returns: Whether C{self} and C{value_object} have been synchronized
before (using C{mediator} if it is not C{None}.)
@raises TypeError: if C{mediator} is neither C{None} nor an instance of
C{L{AbstractMediator <mediator.AbstractMediator>}}.
@note:
If C{L{set}} method of one of the values has been connected to other value’s
‘changed’ signal, but otherwise is not true, this method does nothing and returns
C{False}. So, it should be safe unless C{L{synchronize}} has been called or its
effect has been emulated manually.
Also remember that calling this function is not always the same as calling
C{L{desynchronize}} until it starts to return C{False}. These calls give
different results if values’ C{set} methods are connected to other value’s
‘changed’ signal different number of times. So, C{desynchronize_fully} may be
dangerous at times.
"""
if mediator is not None and not isinstance (mediator, AbstractMediator):
raise TypeError ('second argument must be a mediator')
if ( not isinstance (value_object, AbstractValueObject)
or not value_object._is_mutable ()
or not self._has_signal ()
or not value_object._has_signal ()):
return False
if mediator is None:
forward = self.set
back = value_object.set
else:
forward = mediator.forward (self.set)
back = mediator.back (value_object.set)
if ( value_object.__get_changed_signal ().is_connected (forward)
and self .__get_changed_signal ().is_connected (back)):
value_object.__get_changed_signal ().disconnect_all (forward)
self. __get_changed_signal ().disconnect_all (back)
return True
else:
return False
if 'contextlib' in globals ():
from notify._2_5 import base as _2_5
storing = _2_5.storing
storing_safely = _2_5.storing_safely
synchronizing = _2_5.synchronizing
synchronizing_safely = _2_5.synchronizing_safely
changes_frozen = _2_5.changes_frozen
# This is needed so that Epydoc sees docstrings as UTF-8 encoded.
storing.__module__ = __module__
storing_safely.__module__ = __module__
synchronizing.__module__ = __module__
synchronizing_safely.__module__ = __module__
changes_frozen.__module__ = __module__
del _2_5
def _value_changed (self, new_value):
"""
Method that must be called every time object’s value changes. Note that this
method I{must not} be called from outside, it is for class descendants only.
To follow general contract of the class, this method must be called only when the
value indeed changes, i.e. when C{new_value} is not equal to C{self.get()}.
C{_value_changed} itself does not check it and so this check (if needed) is up to
implementing descendant class.
For convenience, this method always returns C{True}.
@param new_value: the new value of C{self}; will be also passed to ‘changed’
signal handlers.
@rtype: C{bool}
@returns: Always C{True}.
"""
flags = self.__flags
if flags == 1:
self.__signal.emit (new_value)
elif flags == 2:
self.__signal ().emit (new_value)
return True
def is_frozen (self):
"""
Determine if C{self}’s changes are currently frozen, i.e. if changing C{self}’s
value will not emit ‘changed’ signal. However, if the value is changed from the
time the object is frozen to the time it is “thawed”, ‘changed’ signal will be
emitted, once.
@rtype: C{bool}
@see: C{L{with_changes_frozen}}
@see: C{L{changes_frozen}}
"""
return self.__flags < 0
def with_changes_frozen (self, callback, *arguments, **keywords):
"""
Execute C{callback} with optional C{arguments}, freezing ‘changed’ signal of this
object. In other words, any changes to object’s value until C{callback} returns
don’t cause emission of ‘changed’ signal. However, if object value changes during
C{callback} execution, C{with_changes_frozen} method will emit ‘changed’ signal
once, after C{callback} returns.
Calls to this method can be nested, i.e. C{callback} can call
C{with_changes_frozen} on the same object too. In this case, any nested calls
will I{not} emit ‘changed’ signal, leaving it for the outmost call.
This method is useful in the following cases:
- you expect many changes in object’s value, but want interested parties be
informed about final result only, not about all intermediate states;
- you expect at least two changes and the final result may be “no changes”
compared to original;
- you expect many changes, don’t care about intermediate states and want to
improve performance.
In the second case, if the result is indeed “no changes”, this method ensures that
‘changed’ signal is not emitted at all.
@note:
There exists C{L{changes_frozen}} method with the same semantics. It is only
available on Python 2.5 and later, but allows to use the C{with} language
statement and is preferred, if available. This method is provided only since
Py-notify supports Python versions starting with 2.3.
@rtype: C{object}
@returns: Whatever C{callback} returns, unchanged.
"""
# Note: keep in sync with changes_frozen() in `notify/_2_5/base.py'.
if self.__flags >= 0:
original_value = self.get ()
self.__flags -= 4
try:
return callback (*arguments, **keywords)
finally:
self.__flags += 4
new_value = self.get ()
if new_value != original_value:
self._value_changed (new_value)
else:
return callback (*arguments, **keywords)
changed = property (__get_changed_signal,
doc = ("""
The ‘changed’ signal for this object. ‘Changed’ signal is emitted
if and only if the current value is changed and the object is not
L{frozen <is_frozen>}. User of the object must never emit the
signal herself, but may operate with its handlers.
Internally, reading this property creates the signal if it hasn’t
been created yet. Derived classes may assume this behaviour.
@type: C{L{AbstractSignal}}
"""))
def _additional_description (self, formatter):
"""
Generate list of additional descriptions for this object. All description strings
are put in parentheses after basic object description and are separated by
semicolons. Default description mentions number of handlers of ‘changed’ signal,
if there are any at all.
C{formatter} is either C{repr} or C{str} and should be used to format objects
mentioned in list string(s). Its use is not required but encouraged.
Overriden method should look like this:
>>> def _additional_description (self, formatter):
... return (['my-description']
... + super (..., self)._additional_description (formatter))
You may selectively remove descriptions generated by superclasses, but remember
that some of them (including this class) may generate varying number of
descriptions, so this may be not trivial to do. In general, there are no
requirements on contents of returned list, except that it must contain only
strings.
This method is called by standard implementations of C{L{__repr__}} and
C{L{__str__}}. If you use your own (and that is perfectly fine), you don’t need
to override this method.
@param formatter: function (either C{repr} or C{str}) that can be used to format
various objects.
@rtype: C{list}
@returns: List of description strings for this object.
"""
if self._has_signal ():
num_handlers = self.__get_changed_signal ().count_handlers ()
if num_handlers > 1:
return ['%d handlers' % num_handlers]
elif num_handlers == 1:
return ['1 handler']
return []
def __to_string (self, strict):
if strict:
additional_description = self._additional_description (repr)
else:
additional_description = self._additional_description (str)
if additional_description:
return ' (%s)' % '; '.join (additional_description)
else:
return ''
def __repr__(self):
# It is impossible to recreate signal, so don't try to generate a valid Python
# expression.
return '<%s.%s: %s%s>' % (self.__module__, self.__class__.__name__,
repr (self.get ()), self.__to_string (True))
def __str__(self):
return '<%s: %s%s>' % (self.__class__.__name__,
str (self.get ()), self.__to_string (False))
def derive_type (cls, new_class_name, **options):
"""
Derive and return a new type named C{new_class_name}. Various C{options} define
behaviour of instances of the new type. Their set and sometimes semantics are
defined by exact class this method is called for.
@param new_class_name: name of the new class—important mostly for C{__str__} and
C{__repr__} implementations.
@type new_class_name: C{basestring}
@param options: options for the new type, as listed below.
@newfield option: Option, Options
@option: C{object} — valid Python identifier. If specified, derived
type’s constructor will accept one parameter and store it
inside the created instance. It will be used for calling
C{getter} and C{setter} functions.
@option: C{property} — valid Python identifier. If specified,
C{object}’s value will be readable through a property, but
not writable.
@option: C{dict} — if true, derived type will have a C{__dict__}
slot, allowing setting any attribute on it. This is
silently ignored if this class objects already have a dict.
@option: C{getter} — a callable accepting one argument, whose return
value will be used as C{L{get}} method result. If
C{object} option is specified, the only argument will be
the one passed to instance constructor, else it will be
C{self} as passed to C{get} method.
@option: C{setter} — a callable accepting two argument, which will
be called from C{L{set}} method. The first argument is
described in C{getter} option; the second is the C{value}
as passed to C{L{set}} method.
@rtype: C{type}
@raises TypeError: if C{new_class_name} is not a string or is not a valid
Python identifier.
@raises exception: whatever C{L{_generate_derived_type_dictionary}} raises, if
anything.
"""
if not is_valid_identifier (new_class_name):
raise TypeError ("'%s' is not a valid Python identifier" % new_class_name)
full_options = dict (options)
full_options['cls'] = cls
full_options['new_class_name'] = new_class_name
dictionary = { '__slots__': () }
for value in cls._generate_derived_type_dictionary (full_options):
if value[0] != '__slots__':
dictionary[value[0]] = value[1]
else:
if isinstance (value[1], StringType):
dictionary['__slots__'] += (value[1],)
else:
dictionary['__slots__'] += tuple (value[1])
metaclass = dictionary.get ('__metaclass__', type (cls))
new_type = metaclass (new_class_name, (cls,), dictionary)
try:
raise Exception
except Exception:
try:
# We try to pretend that the new type is created by the caller module, not
# by `notify.base'. That will give more helpful __repr__ result.
traceback = sys.exc_info () [2]
new_type.__module__ = traceback.tb_frame.f_back.f_globals['__name__']
except RuntimeError:
# We can do nothing, ignore.
pass
return new_type
def _generate_derived_type_dictionary (cls, options):
"""
Generate an iterable of pairs in the form (Python identifier, value) for a new
type created by C{L{derive_type}}. Exact pairs should be influenced by
C{options}, which are C{options} as passed to C{derive_type} plus C{cls} (for
convenience) and C{new_class_name}.
This method is not meant to be callable from outside, use C{L{derive_type}} for
that instead.
Overriden implementations of this method are recommended but not required to be
generator functions. They should generally start like this:
>>> def _generate_derived_type_dictionary (cls, options):
... for attribute in super (..., cls)._generate_derived_type_dictionary (options):
... yield attribute
...
... ...
That is only an approximation and you can, for instance, change or override
attributes returned by super-method.
C{options} dictionary is constructed in such a way you should be able to evaluate
all function-defining statements in it. For instance, you can write own
C{_generate_derived_type_dictionary} like this:
>>> def _generate_derived_type_dictionary (cls, options):
... ...
...
... functions = {}
...
... if 'foo_value' in options:
... exec 'def foo (self): return foo_value' \
... in { 'foo_value': options['foo_value'] }, functions
...
... ...
...
... for function in functions.iteritems ():
... yield function
Returned value for C{__slots__} is treated specially. While normally values
associated with the same name override previous values, values for C{__slots__}
are combined into a tuple instead.
Note that it is not recommended to use C{options} for execution globals or locals
dictionary directly. This way your code may become vulnerable to other option
addition, e.g. for some derivative of the class. For instance, you may use
C{property} built-in, then setting it in C{options} will hide the built-in from
your code. Consider using C{L{_filter_options}} utility method.
@param options: dictionary of options passed to C{L{derive_type}} method, plus
C{cls} and C{new_class_name}.
@type options: C{dict}
@rtype: iterable
@returns: Pairs of (Python identifier, value) for the new type.
@raises exception: if there is any error in C{options}.
"""
functions = {}
filtered_options = AbstractValueObject._filter_options (options, 'cls', 'getter', 'setter')
if 'object' in options:
object = options['object']
if not is_valid_identifier (object):
raise ValueError ("'%s' is not a valid Python identifier" % object)
yield '__slots__', mangle_identifier (options['new_class_name'], object)
execute (('def __init__(self, %s):\n'
' cls.__init__(self)\n'
' %s = %s')
% (object, AbstractValueObject._get_object (options), object),
filtered_options, functions)
if 'property' in options:
property = options['property']
if property == object:
raise ValueError ("'property' option cannot be the same as 'object'")
if not is_valid_identifier (property):
raise ValueError ("'%s' is not a valid Python identifier" % property)
execute ('%s = property (lambda self: %s)'
% (mangle_identifier (options['new_class_name'], property),
AbstractValueObject._get_object (options)),
functions)
else:
if 'property' in options:
raise ValueError ("'property' without 'object' option doesn't make sense")
if 'dict' in options and options['dict']:
# Gracefully ignore if this type already has a dict.
if not _type_has_dictionary (cls):
yield '__slots__', '__dict__'
if 'getter' in options:
if not is_callable (options['getter']):
raise TypeError ("'getter' must be a callable")
execute ('def get (self): return getter (%s)'
% AbstractValueObject._get_object (options),
filtered_options, functions)
if 'setter' in options:
if not is_callable (options['setter']):
raise TypeError ("'setter' must be a callable")
execute ('def set (self, value): return setter (%s, value)'
% AbstractValueObject._get_object (options),
filtered_options, functions)
for function in functions.items ():
yield function
def _get_object (options):
"""
Return Python expression for object that should be passed to various user
functions. This is either C{"self"} or C{"self.some_attribute"} if C{options}
dictionary contains C{"object"} key. In this case C{"some_attribute"} string is
evaluated to a private attribute name for the class being defined.
This is a helper for C{L{_generate_derived_type_dictionary}} only. It should not
be called from outside.
@param options: the same value as passed to C{_generate_derived_type_dictionary}.
@type options: C{dict}
@rtype: C{str}
"""
if 'object' in options:
return 'self.%s' % mangle_identifier (options['new_class_name'], options['object'])
else:
return 'self'
def _filter_options (options, *names):
"""
Return a subset of C{options} including only those listed in C{names}. This is a
utility method. Its main purpose is to build a subset of options dictionary for
C{L{_generate_derived_type_dictionary}} method, which is safe to use as locals or
globals for evaluating method definitions.
@param options: full dictionary of options.
@type options: C{dict}
@param names: permitted options.
@rtype: C{dict}
"""
filtered_options = {}
for name in names:
if name in options:
filtered_options[name] = options[name]
return filtered_options
derive_type = classmethod (derive_type)
_generate_derived_type_dictionary = classmethod (_generate_derived_type_dictionary)
_get_object = staticmethod (_get_object)
_filter_options = staticmethod (_filter_options)
# Not breaking out to `utils.py' because general case is far from being perfect.
def _type_has_dictionary (cls):
if hasattr (cls, '__dictoffset__'):
return cls.__dictoffset__ > 0
try:
type (cls) ('_UNUSED_', (cls,), { '__slots__': '__dict__' })
return False
except Exception:
# Of course there might be other problems too, but this is the best I can come up
# with for now.
return True
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| lgpl-2.1 |
jcpowermac/ansible | lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py | 30 | 21505 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_firewall_policy
short_description: Create/delete/update firewall policies
description:
- Create or delete or update firewall polices on Centurylink Cloud
version_added: "2.0"
options:
location:
description:
- Target datacenter for the firewall policy
required: True
state:
description:
- Whether to create or delete the firewall policy
default: present
required: False
choices: ['present', 'absent']
source:
description:
- The list of source addresses for traffic on the originating firewall.
This is required when state is 'present"
default: None
required: False
destination:
description:
- The list of destination addresses for traffic on the terminating firewall.
This is required when state is 'present'
default: None
required: False
ports:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
default: None
required: False
choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
firewall_policy_id:
description:
- Id of the firewall policy. This is required to update or delete an existing firewall policy
default: None
required: False
source_account_alias:
description:
- CLC alias for the source account
required: True
destination_account_alias:
description:
- CLC alias for the destination account
default: None
required: False
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
enabled:
description:
- Whether the firewall policy is enabled or disabled
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
---
- name: Create Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: present
source: 10.128.216.0/24
destination: 10.128.216.0/24
ports: Any
destination_account_alias: WFAD
---
- name: Delete Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: absent
firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
'''
RETURN = '''
firewall_policy_id:
description: The fire wall policy id
returned: success
type: string
sample: fc36f1bfd47242e488a9c44346438c05
firewall_policy:
description: The fire wall policy information
returned: success
type: dict
sample:
{
"destination":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"destinationAccount":"wfad",
"enabled":true,
"id":"fc36f1bfd47242e488a9c44346438c05",
"links":[
{
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
}
],
"ports":[
"any"
],
"source":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"status":"active"
}
'''
__version__ = '${version}'
import os
from ansible.module_utils.six.moves.urllib.parse import urlparse
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcFirewallPolicy:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.firewall_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
location=dict(required=True),
source_account_alias=dict(required=True, default=None),
destination_account_alias=dict(default=None),
firewall_policy_id=dict(default=None),
ports=dict(default=None, type='list'),
source=dict(default=None, type='list'),
destination=dict(default=None, type='list'),
wait=dict(default=True),
state=dict(default='present', choices=['present', 'absent']),
enabled=dict(default=True, choices=[True, False])
)
return argument_spec
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
firewall_policy = None
location = self.module.params.get('location')
source_account_alias = self.module.params.get('source_account_alias')
destination_account_alias = self.module.params.get(
'destination_account_alias')
firewall_policy_id = self.module.params.get('firewall_policy_id')
ports = self.module.params.get('ports')
source = self.module.params.get('source')
destination = self.module.params.get('destination')
wait = self.module.params.get('wait')
state = self.module.params.get('state')
enabled = self.module.params.get('enabled')
self.firewall_dict = {
'location': location,
'source_account_alias': source_account_alias,
'destination_account_alias': destination_account_alias,
'firewall_policy_id': firewall_policy_id,
'ports': ports,
'source': source,
'destination': destination,
'wait': wait,
'state': state,
'enabled': enabled}
self._set_clc_credentials_from_env()
if state == 'absent':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
source_account_alias, location, self.firewall_dict)
elif state == 'present':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
source_account_alias, location, self.firewall_dict)
return self.module.exit_json(
changed=changed,
firewall_policy_id=firewall_policy_id,
firewall_policy=firewall_policy)
@staticmethod
def _get_policy_id_from_response(response):
"""
Method to parse out the policy id from creation response
:param response: response from firewall creation API call
:return: policy_id: firewall policy id from creation call
"""
url = response.get('links')[0]['href']
path = urlparse(url).path
path_list = os.path.split(path)
policy_id = path_list[-1]
return policy_id
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_firewall_policy_is_present(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: (changed, firewall_policy_id, firewall_policy)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was created/updated
firewall_policy: The firewall_policy object
"""
firewall_policy = None
firewall_policy_id = firewall_dict.get('firewall_policy_id')
if firewall_policy_id is None:
if not self.module.check_mode:
response = self._create_firewall_policy(
source_account_alias,
location,
firewall_dict)
firewall_policy_id = self._get_policy_id_from_response(
response)
changed = True
else:
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if not firewall_policy:
return self.module.fail_json(
msg='Unable to find the firewall policy id : {0}'.format(
firewall_policy_id))
changed = self._compare_get_request_with_dict(
firewall_policy,
firewall_dict)
if not self.module.check_mode and changed:
self._update_firewall_policy(
source_account_alias,
location,
firewall_policy_id,
firewall_dict)
if changed and firewall_policy_id:
firewall_policy = self._wait_for_requests_to_complete(
source_account_alias,
location,
firewall_policy_id)
return changed, firewall_policy_id, firewall_policy
def _ensure_firewall_policy_is_absent(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is removed if present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: firewall policy to delete
:return: (changed, firewall_policy_id, response)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was deleted
response: response from CLC API call
"""
changed = False
response = []
firewall_policy_id = firewall_dict.get('firewall_policy_id')
result = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if result:
if not self.module.check_mode:
response = self._delete_firewall_policy(
source_account_alias,
location,
firewall_policy_id)
changed = True
return changed, firewall_policy_id, response
def _create_firewall_policy(
self,
source_account_alias,
location,
firewall_dict):
"""
Creates the firewall policy for the given account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response from CLC API call
"""
payload = {
'destinationAccount': firewall_dict.get('destination_account_alias'),
'source': firewall_dict.get('source'),
'destination': firewall_dict.get('destination'),
'ports': firewall_dict.get('ports')}
try:
response = self.clc.v2.API.Call(
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
(source_account_alias, location), payload)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to create firewall policy. %s" %
str(e.response_text))
return response
def _delete_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Deletes a given firewall policy for an account alias in a datacenter
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to delete
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to delete the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _update_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id,
firewall_dict):
"""
Updates a firewall policy for a given datacenter and account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to update
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'PUT',
'/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias,
location,
firewall_policy_id),
firewall_dict)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to update the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
@staticmethod
def _compare_get_request_with_dict(response, firewall_dict):
"""
Helper method to compare the json response for getting the firewall policy with the request parameters
:param response: response from the get method
:param firewall_dict: dictionary of request parameters for firewall policy
:return: changed: Boolean that returns true if there are differences between
the response parameters and the playbook parameters
"""
changed = False
response_dest_account_alias = response.get('destinationAccount')
response_enabled = response.get('enabled')
response_source = response.get('source')
response_dest = response.get('destination')
response_ports = response.get('ports')
request_dest_account_alias = firewall_dict.get(
'destination_account_alias')
request_enabled = firewall_dict.get('enabled')
if request_enabled is None:
request_enabled = True
request_source = firewall_dict.get('source')
request_dest = firewall_dict.get('destination')
request_ports = firewall_dict.get('ports')
if (
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
response_enabled != request_enabled) or (
response_source and response_source != request_source) or (
response_dest and response_dest != request_dest) or (
response_ports and response_ports != request_ports):
changed = True
return changed
def _get_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Get back details for a particular firewall policy
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: id of the firewall policy to get
:return: response - The response from CLC API call
"""
response = None
try:
response = self.clc.v2.API.Call(
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
if e.response_status_code != 404:
self.module.fail_json(
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _wait_for_requests_to_complete(
self,
source_account_alias,
location,
firewall_policy_id,
wait_limit=50):
"""
Waits until the CLC requests are complete if the wait argument is True
:param source_account_alias: The source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: The firewall policy id
:param wait_limit: The number of times to check the status for completion
:return: the firewall_policy object
"""
wait = self.module.params.get('wait')
count = 0
firewall_policy = None
while wait:
count += 1
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
status = firewall_policy.get('status')
if status == 'active' or count > wait_limit:
wait = False
else:
# wait for 2 seconds
sleep(2)
return firewall_policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_firewall = ClcFirewallPolicy(module)
clc_firewall.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
kennethgillen/ansible | lib/ansible/utils/module_docs_fragments/return_common.py | 122 | 1591 | # Copyright (c) 2016 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
RETURN = '''
changed:
description: Whether the module affected changes on the target.
returned: always
type: bool
sample: False
failed:
description: Whether the module failed to execute.
returned: always
type: bool
sample: True
msg:
description: Human-readable message.
returned: as needed
type: string
sample: "all ok"
skipped:
description: Whether the module was skipped.
returned: always
type: bool
sample: False
results:
description: List of module results,
returned: when using a loop.
type: list
sample:[ {changed: True, msg: 'first item changed'}, {changed: False, msg: 'second item ok'}]
exception:
description: Optional information from a handled error.
returned: on some errors
type: string
sample: 'Unknown error'
'''
| gpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-storage/azure/mgmt/storage/v2016_12_01/models/list_service_sas_response.py | 6 | 1136 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ListServiceSasResponse(Model):
"""The List service SAS credentials operation response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar service_sas_token: List service SAS credentials of speicific
resource.
:vartype service_sas_token: str
"""
_validation = {
'service_sas_token': {'readonly': True},
}
_attribute_map = {
'service_sas_token': {'key': 'serviceSasToken', 'type': 'str'},
}
def __init__(self):
super(ListServiceSasResponse, self).__init__()
self.service_sas_token = None
| mit |
andreabrambilla/libres | python/tests/res/enkf/export/test_summary_observation_collector.py | 1 | 1748 | import os
from tests import ResTest
from res.test import ErtTestContext
from res.enkf.export import SummaryObservationCollector
class SummaryObservationCollectorTest(ResTest):
def setUp(self):
os.environ["TZ"] = "CET" # The ert_statoil case was generated in CET
self.config = self.createTestPath("local/snake_oil/snake_oil.ert")
def test_summary_observation_collector(self):
with ErtTestContext("python/enkf/export/summary_observation_collector", self.config) as context:
ert = context.getErt()
self.assertTrue(SummaryObservationCollector.summaryKeyHasObservations(ert, "FOPR"))
self.assertFalse(SummaryObservationCollector.summaryKeyHasObservations(ert, "FOPT"))
keys = SummaryObservationCollector.getAllObservationKeys(ert)
self.assertTrue("FOPR" in keys)
self.assertTrue("WOPR:OP1" in keys)
self.assertFalse("WOPR:OP2" in keys)
data = SummaryObservationCollector.loadObservationData(ert, "default_0")
self.assertFloatEqual(data["FOPR"]["2010-01-10"], 0.001696887)
self.assertFloatEqual(data["STD_FOPR"]["2010-01-10"], 0.1)
self.assertFloatEqual(data["WOPR:OP1"]["2010-03-31"], 0.1)
self.assertFloatEqual(data["STD_WOPR:OP1"]["2010-03-31"], 0.05)
with self.assertRaises(KeyError):
fgir = data["FGIR"]
data = SummaryObservationCollector.loadObservationData(ert, "default_0", ["WOPR:OP1"])
self.assertFloatEqual(data["WOPR:OP1"]["2010-03-31"], 0.1)
self.assertFloatEqual(data["STD_WOPR:OP1"]["2010-03-31"], 0.05)
with self.assertRaises(KeyError):
data["FOPR"]
| gpl-3.0 |
aminorex/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/gluesemantics/lfg.py | 9 | 17968 | # Natural Language Toolkit: Lexical Functional Grammar
#
# Author: Dan Garrette <[email protected]>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
class FStructure(dict):
def read_depgraph(depgraph):
return FStructure._read_depgraph(depgraph.root, depgraph, [0])
read_depgraph = staticmethod(read_depgraph)
def _read_depgraph(node, depgraph, current_label=[0], parent=None):
if node['rel'].lower() in ['spec']:
# the value of a 'spec' entry is a word, not an FStructure
return (node['word'], node['tag'])
else:
self = FStructure()
self.pred = None
self.label = ['f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b','c','d','e'][current_label[0]]
current_label[0] += 1
self.parent = parent
(word, tag) = (node['word'], node['tag'])
if tag[:2] == 'VB':
if tag[2:3] == 'D':
self['tense'] = ('PAST', 'tense')
self.pred = (word, tag[:2])
if not self.pred:
self.pred = (word, tag)
children = [depgraph.nodelist[idx] for idx in node['deps']]
for child in children:
self[child['rel']] = FStructure._read_depgraph(child, depgraph, current_label, self)
return self
_read_depgraph = staticmethod(_read_depgraph)
def read_parsetree(pt, current_label=[0], parent=None):
self = FStructure()
try:
from nltk.tree import Tree
assert isinstance(pt, Tree)
except AssertionError:
print 'Error Tree: \n%s\nis of type %s' % (pt, type(pt))
raise
self.pred = None
self.label = ['f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b','c','d','e'][current_label[0]]
current_label[0] += 1
self.parent = parent
if FStructure.head(pt.node) == 'S':
if FStructure.head(pt[0].node) == 'NP': # S -> NP VP
self['subj'] = FStructure.read_parsetree(pt[0], current_label, self)
if FStructure.head(pt[1][0].node) == 'IV' or FStructure.head(pt[1][0].node) == 'TV' or \
FStructure.head(pt[1][0].node) == 'DTV' or FStructure.head(pt[1][0].node) == 'EquiV' or \
FStructure.head(pt[1][0].node) == 'ObjEquiV' or FStructure.head(pt[1][0].node) == 'TVComp' or \
FStructure.head(pt[1][0].node) == 'RaisingV':
self.pred = (pt[1][0][0], pt[1][0].node['sem']) # the verb
if FStructure.head(pt[1][0].node) == 'TV' or FStructure.head(pt[1][0].node) == 'DTV' or \
FStructure.head(pt[1][0].node) == 'ObjEquiV':
# OBJ for TV, DTV, ObjEquiV
self['obj'] = FStructure.read_parsetree(pt[1][1], current_label, self)
if FStructure.head(pt[1][0].node) == 'DTV':
# THEME for VP -> DTV NP [NP]
self['theme'] = FStructure.read_parsetree(pt[1][2], current_label, self)
elif FStructure.head(pt[1][0].node) == 'ObjEquiV':
# XCOMP for VP -> ObjEquiV NP TO [VP]
self['xcomp'] = FStructure.read_parsetree(pt[1][3], current_label, self)
# subj of XCOMP is subj of whole
# ie "John persuades David to go" = "John persaudes David that David goes"
self['xcomp']['subj'] = self['obj']
elif FStructure.head(pt[1][0].node) == 'TVComp':
# VP -> TVComp [S]
self['comp'] = FStructure.read_parsetree(pt[1][1], current_label, self)
elif FStructure.head(pt[1][0].node) == 'EquiV':
# VP -> EquiV TO [VP]
self['xcomp'] = FStructure.read_parsetree(pt[1][2], current_label, self)
# subj of XCOMP is subj of whole
# ie "John tries to go" = "John tries that John goes"
self['xcomp']['subj'] = self['subj']
elif FStructure.head(pt[1][0].node) == 'RaisingV':
# VP -> RaisingV TO [VP]
self['xcomp'] = FStructure.read_parsetree(pt[1][2], current_label, self)
# subj of XCOMP is subj of whole
# ie "John tries to go" = "John tries that John goes"
self['xcomp']['subj'] = self['subj']
## elif FStructure.head(pt[1][0].node) == 'ADV':
## # VP -> ADV VP
## r = _get_v_and_adjs(pt[1], current_label)
## self.pred = r[0]
## if r[1] != []: self['adj'] = r[1]
elif FStructure.head(pt[1][1].node) == 'CC':
# VP -> VP CC VP
self.pred = (pt[1][1][0], pt[1][1].node['sem']) # the CC
self['conjuncts'] = [FStructure.read_parsetree(pt[1][0], current_label, self)]
self['conjuncts'].append(FStructure.read_parsetree(pt[1][2], current_label, self))
# Both verbs have the same subject
self['conjuncts'][0]['subj'] = FStructure.read_parsetree(pt[0], current_label, self)
self['conjuncts'][1]['subj'] = self['conjuncts'][0]['subj']
elif FStructure.head(pt[1].node) == 'CC': # S -o S CC S
self.pred = (pt[1][0], pt[1].node['sem']) # the CC
self['conjuncts'] = [FStructure.read_parsetree(pt[0], current_label, self)]
self['conjuncts'].append(FStructure.read_parsetree(pt[2], current_label, self))
elif FStructure.head(pt.node) == 'NP':
if FStructure.head(pt[0].node) == 'Det':
# NP -> Det N
r = FStructure._get_n_and_adjs(pt[1], current_label, self)
self.pred = (r[0][0], r[0].node['sem'])
if r[1] != []: self['adj'] = (r[1][0], r[1].node['sem'])
self['spec'] = (pt[0][0][0], pt[0][0].node['sem']) # get the Det
elif FStructure.head(pt[0].node) == 'PropN' or FStructure.head(pt[0].node) == 'Pro':
# NP -> PropN | Pro
self.pred = (pt[0][0], pt[0].node['sem'])
elif FStructure.head(pt[0].node) == 'N':
# NP -> N[num=pl]
r = FStructure._get_n_and_adjs(pt[0], current_label, self)
self.pred = (r[0][0], r[0].node['sem'])
if r[1] != []: self['adj'] = (r[1][0], r[1].node['sem'])
elif FStructure.head(pt[1].node) == 'CC': # NP -o NP CC NP
self.pred = (pt[1][0], pt[1].node['sem']) # the CC
self['conjuncts'] = [FStructure.read_parsetree(pt[0], current_label, self)]
self['conjuncts'].append(FStructure.read_parsetree(pt[2], current_label, self))
elif FStructure.head(pt.node) == 'VP':
if FStructure.head(pt[0].node) == 'IV' or FStructure.head(pt[0].node) == 'TV' or \
FStructure.head(pt[0].node) == 'DTV' or FStructure.head(pt[0].node) == 'EquiV' or \
FStructure.head(pt[0].node) == 'ObjEquiV' or FStructure.head(pt[0].node) == 'RaisingV' or \
FStructure.head(pt[0].node) == 'TVComp':
self.pred = (pt[0][0], pt[0].node['sem']) # the verb
if FStructure.head(pt[0].node) == 'TV' or FStructure.head(pt[0].node) == 'DTV' or \
FStructure.head(pt[0].node) == 'ObjEquiV':
# OBJ for TV, DTV, ObjEquiV
self['obj'] = FStructure.read_parsetree(pt[1], current_label, self)
if FStructure.head(pt[0].node) == 'DTV':
# THEME for VP -o DTV NP [NP]
self['theme'] = FStructure.read_parsetree(pt[2], current_label, self)
elif FStructure.head(pt[0].node) == 'ObjEquiV':
# XCOMP for VP -o ObjEquiV NP TO [VP]
self['xcomp'] = FStructure.read_parsetree(pt[3], current_label, self)
# subj of XCOMP is obj of whole
# ie "John persuades David to go" = "John persaudes David that David goes"
self['xcomp']['subj'] = self['obj']
elif FStructure.head(pt[0].node) == 'TVComp':
# VP -> TVComp [S]
self['comp'] = FStructure.read_parsetree(pt[1], current_label, self)
elif FStructure.head(pt[0].node) == 'EquiV':
# VP -> EquiV TO [VP]
self['xcomp'] = FStructure.read_parsetree(pt[2], current_label, self)
# subj of XCOMP is subj of whole
# ie "John tries to go" = "John tries that John goes"
self['xcomp']['subj'] = self['subj']
elif FStructure.head(pt[0].node) == 'RaisingV':
# VP -> RaisingV TO [VP]
self['xcomp'] = FStructure.read_parsetree(pt[2], current_label, self)
# subj of XCOMP is subj of whole
# ie "John tries to go" = "John tries that John goes"
self['xcomp']['subj'] = self['subj']
## elif FStructure.head(pt[0].node) == 'RB':
## # VP -> RB VP
## self.pred = pt[1] # the verb
## self['adj'] = [FStructure.read_parsetree(pt[0], current_label, self)]
elif FStructure.head(pt[1].node) == 'CC':
# VP -> VP CC VP
self.pred = (pt[1][0], pt[1].node['sem']) # the CC
self['conjuncts'] = [FStructure.read_parsetree(pt[0], current_label, self)]
self['conjuncts'].append(FStructure.read_parsetree(pt[2], current_label, self))
# Both verbs have the same subject
self['conjuncts'][0]['subj'] = FStructure.read_parsetree(pt[0], current_label, self)
self['conjuncts'][1]['subj'] = self['conjuncts'][0]['subj']
elif FStructure.head(pt.node) == 'JJ':
if isinstance(pt[0], str):
## JJ lexical entry
self.pred = (pt[0], pt.node['sem'])
elif FStructure.head(pt.node) == 'ADV':
if isinstance(pt[0], str):
## ADV lexical entry
self.pred = (pt[0], pt.node['sem'])
if self.pred is None:
raise RuntimeError, 'FStructure creation from\n%s\nwas unsuccessful.' % (pt)
return self
read_parsetree = staticmethod(read_parsetree)
def to_glueformula_list(self, glue_pos_dict, labels_added=[], current_subj=None, verbose=False):
from nltk.tree import Tree
glueformulas = []
if not current_subj:
current_subj = self
# lookup 'pred'
sem = self.pred[1]
lookup = glue_pos_dict.lookup(sem, self.pred[0], current_subj, self)
glueformulas.extend(lookup)
for feature in self:
if isinstance(self[feature], FStructure):
if not self[feature].label in labels_added:
glueformulas.extend(self[feature].to_glueformula_list(glue_pos_dict, labels_added,self))
labels_added.append(self[feature].label)
elif isinstance(self[feature], tuple):
glueformulas.extend(glue_pos_dict.lookup(self[feature][1], self[feature][0], None, self))
elif isinstance(self[feature], list):
for entry in self[feature]:
glueformulas.extend(entry.to_glueformula_list(glue_pos_dict, labels_added))
else:
raise Exception, 'feature %s is not an FStruct, a list, or a tuple' % feature
return glueformulas
def initialize_label(self, expression, unique_var_id=[0]):
try:
dot = expression.index('.')
before_dot = expression[:dot]
after_dot = expression[dot+1:]
if before_dot=='super':
return self.parent.initialize_label(after_dot)
else:
try:
return self[before_dot].initialize_label(after_dot)
except KeyError:
raise KeyError, 'FStructure doesn\'t contain a feature %s' % before_dot
except ValueError:
lbl = self.label
if expression=='f': return lbl
elif expression=='v': return '%sv' % lbl
elif expression=='r': return '%sr' % lbl
elif expression=='super': return self.parent.label
elif expression=='var': return '%s%s' % (self.label.upper(), unique_var_id[0])
elif expression=='a': return self['conjuncts'][0].label
elif expression=='b': return self['conjuncts'][1].label
else: return self[expression].label
def head(node):
for (fname, fval) in sorted(node.items()):
display = getattr(fname, 'display', None)
if display == 'prefix':
return fval
return None
head = staticmethod(head)
def _get_n_and_adjs(pt, current_label, parent):
""" This function is here to deal with N -o JJ N rules
since we don't know exactly where the noun is.
Returns (noun_word, list_of_adj_fstructs) """
if FStructure.head(pt.node) == 'N':
if isinstance(pt[0], str):
# N lexical entry
return (pt, [])
else: #if FStructure.head(self[0].node) == 'JJ':
# N -o JJ N rule
r = FStructure._get_n_and_adjs(pt[1], current_label, parent)
jj_fstruct = FStructure.read_parsetree(pt[0], current_label, parent)
r[1].append(jj_fstruct) # append the current node's JJ
return (r[0], r[1])
#if it doesn't match a pattern
raise RuntimeError, '%s is not of a valid N rule.' % (pt[0])
_get_n_and_adjs = staticmethod(_get_n_and_adjs)
def __repr__(self):
from nltk.tree import Tree
try:
accum = '%s:[' % self.label
except NameError:
accum = '['
try:
accum += 'pred \'%s\'' % self.pred[0]
except NameError:
pass
for feature in self:
if isinstance(self[feature], FStructure):
accum += ' %s %s' % (feature, self[feature].__repr__())
elif isinstance(self[feature], tuple):
accum += ' %s \'%s\'' % (feature, self[feature][0])
elif isinstance(self[feature], list):
accum += ' %s {' % (feature)
for entry in self[feature]:
accum += '%s' % entry
accum += '}'
else: # ERROR
raise Exception, 'feature %s (%s) is not an FStruct, a list, or a tuple' % (feature, self[feature])
return accum+']'
def __str__(self, indent=3):
from nltk.tree import Tree
try:
accum = '%s:[' % self.label
except NameError:
accum = '['
try:
accum += 'pred \'%s\'' % self.pred[0]
except NameError:
pass
for feature in self:
if isinstance(self[feature], FStructure):
next_indent = indent+len(feature)+3+len(self.label)
accum += '\n%s%s %s' % (' '*(indent), feature, self[feature].__str__(next_indent))
elif isinstance(self[feature], tuple):
accum += '\n%s%s \'%s\'' % (' '*(indent), feature, self[feature][0])
elif isinstance(self[feature], list):
accum += '\n%s%s {%s}' % (' '*(indent), feature, ('\n%s' % (' '*(indent+len(feature)+2))).join(self[feature]))
else: # ERROR
raise Exception, 'feature %s is not an FStruct, a list, or a tuple' % feature
return accum+']'
def demo_read_depgraph():
from nltk_contrib.dependency import DepGraph
dg1 = DepGraph().read("""\
Esso NNP 2 SUB
said VBD 0 ROOT
the DT 5 NMOD
Whiting NNP 5 NMOD
field NN 6 SUB
started VBD 2 VMOD
production NN 6 OBJ
Tuesday NNP 6 VMOD
""")
dg2 = DepGraph().read("""\
John NNP 2 SUB
sees VBP 0 ROOT
Mary NNP 2 OBJ
""")
dg3 = DepGraph().read("""\
a DT 2 SPEC
man N 3 SUBJ
walks IV 0 ROOT
""")
dg4 = DepGraph().read("""\
every DT 2 SPEC
girl N 3 SUBJ
chases TV 0 ROOT
a DT 5 SPEC
dog NNP 3 OBJ
""")
# depgraphs = [dg1,dg2,dg3,dg4]
# for dg in depgraphs:
# print FStructure.read_depgraph(dg)
print FStructure.read_depgraph(dg3)
def demo_depparse():
from nltk_contrib.dependency import malt
dg = malt.parse('John sees Mary', True)
print FStructure.read_depgraph(dg)
if __name__ == '__main__':
demo_read_depgraph()
print '\n'
demo_depparse()
| gpl-3.0 |
rickhurst/Django-non-rel-blog | django/contrib/admin/sites.py | 80 | 17398 | import re
from django import http, template
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.contenttypes import views as contenttype_views
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils.functional import update_wrapper
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name=None, app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.root_path = None
if name is None:
self.name = 'admin'
else:
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
# Don't import the humongous validation code unless required
if admin_class and settings.DEBUG:
from django.contrib.admin.validation import validate
else:
validate = lambda model, adminclass: None
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Validate (which might be a no-op)
validate(admin_class, model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.iteritems()
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls.defaults import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>[a-z\d]+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut)),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in self._registry.iteritems():
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.module_name),
include(model_admin.urls))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
if self.root_path is not None:
url = '%spassword_change/done/' % self.root_path
else:
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'root_path': self.root_path,
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),
'perms': perms,
}
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_url': app_label + '/',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = app_dict.values()
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = {
'title': _('Site administration'),
'app_list': app_list,
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.index_template or 'admin/index.html', context,
context_instance=context_instance
)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': '%s/' % model.__name__.lower(),
'perms': perms,
}
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise http.Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.app_index_template or ('admin/%s/app_index.html' % app_label,
'admin/app_index.html'), context,
context_instance=context_instance
)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
tiffanyj41/hermes | src/data_prep/movieLens_vectorize.py | 3 | 6265 | import numpy as np
class movieLens_vectorize():
def __init__(self, user_interactions, content, user_vector_type, content_vector_type, sqlCtx, **support_files ):
"""
Class initializer to load the required files
Args:
user_interactions: The raw RDD of the user interactions. For MovieLens, these are the ratings
content: The raw RDD containing the item content. For MovieLens, this is the movie categories
user_vector_type: The type of user vector desired. For MovieLens you can choose between ['ratings', 'pos_ratings', 'ratings_to_interact', 'none'].
If 'none' is used then this means you will run your own custom mapping
content_vector_type: The type of content vector desired. For MovieLens you can choose between ['genre', 'tags', 'none'].
If none is chosen no content vector will be returned and None may be passed into the content argument.
You do not need a content vector to run pure CF only but some performance metrics will not be able to be ran
support_files: If they exist, the supporting files, dataFrames, and/or file links necessary to run the content vectors.
To generate a content vector on tags, you must pass in the tag RDD as support_files['tag_rdd']
You may also pass in the number of tags to utilize as support_files['num_tags']. Otherwise default is set to 300
"""
self.user_vector_type = user_vector_type
self.content_vector_type = content_vector_type
#Filter out uninteresting articles and users if they still exist in the dataset
self.user_interactions =user_interactions
self.user_interactions.registerTempTable("ratings")
self.content = content
self.content.registerTempTable("content")
self.sqlCtx = sqlCtx
#if no support files were passed in, initialize an empty support file
if support_files:
self.support_files = support_files
else:
self.support_files = {}
def get_user_vector(self):
if self.user_vector_type=='ratings':
user_info = self.user_interactions.map(lambda row: (row.user_id, row.movie_id, row.rating) )
return user_info
elif self.user_vector_type=='pos_ratings':
user_info = self.user_interactions.map(lambda row: (row.user_id, row.movie_id, row.rating) ).filter(lambda (u,m,r): r>3)
return user_info
elif self.user_vector_type=='ratings_to_interact':
user_info = self.user_interactions.map(lambda row: (row.user_id, row.movie_id, rating_to_interaction(row.rating)) )
return user_info
elif self.user_vector_type=='none':
return None
else:
print "Please choose a user_vector_type between 'ratings', 'pos_ratings', 'ratings_to_interact', and 'none'"
return None
def get_content_vector(self):
if self.content_vector_type=='genre':
content_array = self.content.map(lambda row: (row.movie_id, genre_vectorizer(row)))
return content_array
elif self.content_vector_type=='tags':
if "tag_rdd" in self.support_files:
tag_rdd = self.support_files['tag_rdd']
tag_rdd.registerTempTable('tags')
#having the user pass in the nubmer of tags is not required but optional. Default set to
if "num_tags" in self.support_files:
num_tags = self.support_files['num_tags']
else:
num_tags = 300
#get the top tags based on frequency
tag_freq = self.sqlCtx.sql("select tag, count(*) as t_count from tags group by tag").collect()
top_tags = sorted(tag_freq, key=lambda x: x[1], reverse=True)[:num_tags]
#get the top tags into a pretty list
tag_list = []
for elem in top_tags:
tag_list.append(elem[0])
tag_content =tag_rdd.map(lambda row:(row.movie_id, row.tag)).groupByKey().map(lambda row: get_tag_vect(row, tag_list))
#instead of just having the tags - we really want to join this with the content. Mostly because more items will have content.
content_array = self.content.map(lambda row: (row.movie_id, genre_vectorizer(row)))
#join the tag content and the content_array
joined_content = content_array.leftOuterJoin(tag_content)\
.map(lambda (mid, (genre, tags)): (mid, list(genre)+tag_vect(tags, num_tags)))
return joined_content
else:
print "Please pass in a tag RDD. Like: support_files['tag_rdd'] = sqlCtx.read.json('movielens_20m_tags.json.gz')"
elif self.content_vector_type=='none':
return None
else:
print "Please choose a content_vector_type between 'genre', 'tags', or 'none'"
return None
def rating_to_interaction(rating):
if rating<3:
return -1
else:
return 1
def get_tag_vect(row, keeper_tags):
tag_vect = np.zeros(len(keeper_tags))
for tag in row[1]:
try:
index = keeper_tags.index(tag)
tag_vect[index] = 1
except:
pass
return (row[0], tag_vect)
def tag_vect(tags, num_tags):
if tags is None:
return list(np.zeros(num_tags))
else:
return list(tags)
def genre_vectorizer(row):
return np.array((
int(row.genre_action),
int(row.genre_adventure),
int(row.genre_animation),
int(row.genre_childrens),
int(row.genre_comedy),
int(row.genre_crime),
int(row.genre_documentary),
int(row.genre_drama),
int(row.genre_fantasy),
int(row.genre_filmnoir),
int(row.genre_horror),
int(row.genre_musical),
int(row.genre_mystery),
int(row.genre_romance),
int(row.genre_scifi),
int(row.genre_thriller),
int(row.genre_war),
int(row.genre_western),
)) | apache-2.0 |
EliasVansteenkiste/ConnectionRouter | vtr_flow/scripts/benchtracker/util.py | 2 | 1795 | from __future__ import print_function, division
import re
import sys
import os.path
import socket
import getpass
# working on the task directory
def sort_runs(runs):
natural_sort(runs)
def walk_runs(params, operation, select=sort_runs):
"""walk the selected run# directories and apply operation on each"""
runs = [run for run in immediate_subdir(params.task_dir) if run.startswith(params.run_prefix)]
# select how to and which runs to use for a certain range
select(runs)
if not runs:
print("no {}s in {}".format(params.run_prefix, params.task_dir))
sys.exit(1)
for run in runs:
operation(params, run)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
l.sort(key=alphanum_key)
def get_result_file(params, run):
return os.path.join(params.task_dir, run, params.result_file)
def get_task_table_name(params):
return '[' + "|".join([params.task_name, socket.gethostname(), getpass.getuser(), params.task_path]) + ']'
def immediate_subdir(root):
return [name for name in os.listdir(root) if os.path.isdir(os.path.join(root, name))]
def get_trailing_num(s):
match = re.search(r'\d+$', s)
return int(match.group()) if match else None
def is_type_of(s, convert):
try:
convert(s)
return True
except ValueError:
return False
def is_int(s):
return is_type_of(s, int)
def is_float(s):
return is_type_of(s, float)
# try converting to numerical types using the strictest converters first
def convert_strictest(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
| mit |
cgstudiomap/cgstudiomap | main/parts/odoo/openerp/addons/test_documentation_examples/tests/test_delegation.py | 366 | 1299 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestDelegation(common.TransactionCase):
def setUp(self):
super(TestDelegation, self).setUp()
env = self.env
record = env['delegation.parent'].create({
'child0_id': env['delegation.child0'].create({'field_0': 0}).id,
'child1_id': env['delegation.child1'].create({'field_1': 1}).id,
})
self.record = record
def test_delegating_record(self):
env = self.env
record = self.record
# children fields can be looked up on the parent record directly
self.assertEqual(
record.field_0
,
0
)
self.assertEqual(
record.field_1
,
1
)
def test_swap_child(self):
env = self.env
record = self.record
record.write({
'child0_id': env['delegation.child0'].create({'field_0': 42}).id
})
self.assertEqual(
record.field_0
,
42
)
def test_write(self):
record = self.record
record.write({'field_1': 4})
self.assertEqual(
record.field_1
,
4
)
self.assertEqual(
record.child1_id.field_1
,
4
)
| agpl-3.0 |
Pathel/deuterium | src/tests/test_comms_comms.py | 2 | 3446 | import unittest
class TestChannel(unittest.TestCase):
def test_at_channel_create(self):
# channel = Channel()
# self.assertEqual(expected, channel.at_channel_create())
assert True # TODO: implement your test here
def test_at_init(self):
# channel = Channel()
# self.assertEqual(expected, channel.at_init())
assert True # TODO: implement your test here
def test_channel_prefix(self):
# channel = Channel()
# self.assertEqual(expected, channel.channel_prefix(msg, emit))
assert True # TODO: implement your test here
def test_distribute_message(self):
# channel = Channel()
# self.assertEqual(expected, channel.distribute_message(msg, online))
assert True # TODO: implement your test here
def test_format_external(self):
# channel = Channel()
# self.assertEqual(expected, channel.format_external(msg, senders, emit))
assert True # TODO: implement your test here
def test_format_message(self):
# channel = Channel()
# self.assertEqual(expected, channel.format_message(msg, emit))
assert True # TODO: implement your test here
def test_format_senders(self):
# channel = Channel()
# self.assertEqual(expected, channel.format_senders(senders))
assert True # TODO: implement your test here
def test_message_transform(self):
# channel = Channel()
# self.assertEqual(expected, channel.message_transform(msg, emit, prefix, sender_strings, external))
assert True # TODO: implement your test here
def test_msg(self):
# channel = Channel()
# self.assertEqual(expected, channel.msg(msgobj, header, senders, sender_strings, persistent, online, emit, external))
assert True # TODO: implement your test here
def test_pose_transform(self):
# channel = Channel()
# self.assertEqual(expected, channel.pose_transform(msg, sender_string))
assert True # TODO: implement your test here
def test_post_join_channel(self):
# channel = Channel()
# self.assertEqual(expected, channel.post_join_channel(joiner))
assert True # TODO: implement your test here
def test_post_leave_channel(self):
# channel = Channel()
# self.assertEqual(expected, channel.post_leave_channel(leaver))
assert True # TODO: implement your test here
def test_post_send_message(self):
# channel = Channel()
# self.assertEqual(expected, channel.post_send_message(msg))
assert True # TODO: implement your test here
def test_pre_join_channel(self):
# channel = Channel()
# self.assertEqual(expected, channel.pre_join_channel(joiner))
assert True # TODO: implement your test here
def test_pre_leave_channel(self):
# channel = Channel()
# self.assertEqual(expected, channel.pre_leave_channel(leaver))
assert True # TODO: implement your test here
def test_pre_send_message(self):
# channel = Channel()
# self.assertEqual(expected, channel.pre_send_message(msg))
assert True # TODO: implement your test here
def test_tempmsg(self):
# channel = Channel()
# self.assertEqual(expected, channel.tempmsg(message, header, senders))
assert True # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
theheros/kbengine | kbe/res/scripts/common/Lib/test/test_shutil.py | 3 | 36253 | # Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
import functools
from test import support
from test.support import TESTFN
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats, Error, unpack_archive,
register_unpack_format, RegistryError,
unregister_unpack_format, get_unpack_formats)
import tarfile
import warnings
from test import support
from test.support import TESTFN, check_warnings, captured_stdout
try:
import bz2
BZ2_SUPPORTED = True
except ImportError:
BZ2_SUPPORTED = False
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
def _fake_rename(*args, **kwargs):
# Pretend the destination path is on a different filesystem.
raise OSError()
def mock_rename(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
builtin_rename = os.rename
os.rename = _fake_rename
return func(*args, **kwargs)
finally:
os.rename = builtin_rename
return wrap
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def _write_data(self, path, data):
f = open(path, "w")
f.write(data)
f.close()
def test_copytree_simple(self):
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
self._write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
self._write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
self._write_data(join(src_dir, 'test.txt'), '123')
self._write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
self._write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
self._write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
self._write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'),
'456')
self._write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'),
'456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
@unittest.skipUnless(hasattr(os, 'link'), 'requires os.link')
def test_dont_copy_file_onto_link_to_itself(self):
# Temporarily disable test on Windows.
if os.name == 'nt':
return
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_dont_copy_file_onto_symlink_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
@support.skip_unless_symlink
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
def test_copytree_special_func(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
self._write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
self._write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
copied = []
def _copy(src, dst):
copied.append((src, dst))
shutil.copytree(src_dir, dst_dir, copy_function=_copy)
self.assertEqual(len(copied), 2)
@support.skip_unless_symlink
def test_copytree_dangling_symlinks(self):
# a dangling symlink raises an error at the end
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.symlink('IDONTEXIST', os.path.join(src_dir, 'test.txt'))
os.mkdir(os.path.join(src_dir, 'test_dir'))
self._write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
self.assertRaises(Error, shutil.copytree, src_dir, dst_dir)
# a dangling symlink is ignored with the proper flag
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, ignore_dangling_symlinks=True)
self.assertNotIn('test.txt', os.listdir(dst_dir))
# a dangling symlink is copied if symlinks=True
dst_dir = os.path.join(self.mkdtemp(), 'destination3')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertIn('test.txt', os.listdir(dst_dir))
def _copy_file(self, method):
fname = 'test.txt'
tmpdir = self.mkdtemp()
self.write_file([tmpdir, fname])
file1 = os.path.join(tmpdir, fname)
tmpdir2 = self.mkdtemp()
method(file1, tmpdir2)
file2 = os.path.join(tmpdir2, fname)
return (file1, file2)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
def test_copy(self):
# Ensure that the copied file exists and has the same mode bits.
file1, file2 = self._copy_file(shutil.copy)
self.assertTrue(os.path.exists(file2))
self.assertEqual(os.stat(file1).st_mode, os.stat(file2).st_mode)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
@unittest.skipUnless(hasattr(os, 'utime'), 'requires os.utime')
def test_copy2(self):
# Ensure that the copied file exists and has the same mode and
# modification time bits.
file1, file2 = self._copy_file(shutil.copy2)
self.assertTrue(os.path.exists(file2))
file1_stat = os.stat(file1)
file2_stat = os.stat(file2)
self.assertEqual(file1_stat.st_mode, file2_stat.st_mode)
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(file1_stat, attr),
getattr(file2_stat, attr) + 1)
if hasattr(os, 'chflags') and hasattr(file1_stat, 'st_flags'):
self.assertEqual(getattr(file1_stat, 'st_flags'),
getattr(file2_stat, 'st_flags'))
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
def _compare_dirs(self, dir1, dir2):
# check that dir1 and dir2 are equivalent,
# return the diff
diff = []
for root, dirs, files in os.walk(dir1):
for file_ in files:
path = os.path.join(root, file_)
target_path = os.path.join(dir2, os.path.split(path)[-1])
if not os.path.exists(target_path):
diff.append(file_)
return diff
@unittest.skipUnless(zlib, "Requires zlib")
def test_unpack_archive(self):
formats = ['tar', 'gztar', 'zip']
if BZ2_SUPPORTED:
formats.append('bztar')
for format in formats:
tmpdir = self.mkdtemp()
base_dir, root_dir, base_name = self._create_files()
tmpdir2 = self.mkdtemp()
filename = make_archive(base_name, format, root_dir, base_dir)
# let's try to unpack it now
unpack_archive(filename, tmpdir2)
diff = self._compare_dirs(tmpdir, tmpdir2)
self.assertEqual(diff, [])
# and again, this time with the format specified
tmpdir3 = self.mkdtemp()
unpack_archive(filename, tmpdir3, format=format)
diff = self._compare_dirs(tmpdir, tmpdir3)
self.assertEqual(diff, [])
self.assertRaises(shutil.ReadError, unpack_archive, TESTFN)
self.assertRaises(ValueError, unpack_archive, TESTFN, format='xxx')
def test_unpack_registery(self):
formats = get_unpack_formats()
def _boo(filename, extract_dir, extra):
self.assertEqual(extra, 1)
self.assertEqual(filename, 'stuff.boo')
self.assertEqual(extract_dir, 'xx')
register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
unpack_archive('stuff.boo', 'xx')
# trying to register a .boo unpacker again
self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
['.boo'], _boo)
# should work now
unregister_unpack_format('Boo')
register_unpack_format('Boo2', ['.boo'], _boo)
self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())
# let's leave a clean state
unregister_unpack_format('Boo2')
self.assertEqual(get_unpack_formats(), formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
with open(self.src_file, "wb") as f:
f.write(b"spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
@mock_rename
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
self.test_move_file()
@mock_rename
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
self.test_move_file_to_dir()
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
@mock_rename
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
self.test_move_dir()
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
@mock_rename
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
self.test_move_dir_to_dir()
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
if os.path.exists(dst_dir):
os.rmdir(dst_dir)
def test_main():
support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
fivejjs/pyhsmm-autoregressive | autoregressive/util.py | 1 | 1780 | from __future__ import division
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
### striding data for efficient AR computations
def AR_striding(data,nlags):
# I had some trouble with views and as_strided, so copy if not contiguous
data = np.asarray(data)
if not data.flags.c_contiguous:
data = data.copy(order='C')
if data.ndim == 1:
data = np.reshape(data,(-1,1))
sz = data.dtype.itemsize
return ast(
data,
shape=(data.shape[0]-nlags,data.shape[1]*(nlags+1)),
strides=(data.shape[1]*sz,sz))
def undo_AR_striding(strided_data,nlags):
sz = strided_data.dtype.itemsize
return ast(
strided_data,
shape=(strided_data.shape[0]+nlags,strided_data.shape[1]/(nlags+1)),
strides=(strided_data.shape[1]/(nlags+1)*sz,sz))
### analyzing AR coefficient matrices
def canonical_matrix(A):
# NOTE: throws away affine part
D, nlags, _ = dimensions(A)
mat = np.zeros((D*nlags,D*nlags))
mat[:-D,D:] = np.eye(D*(nlags-1))
mat[-D:,:] = A[:,:D*nlags]
return mat
def eval_siso_transfer_function(A,from_idx,to_idx,freqs):
D, _, _ = dimensions(A)
assert 0 <= from_idx < D and 0 <= to_idx < D
bigA = canonical_matrix(A)
I = np.eye(bigA.shape[0])
zs = np.exp(1j*np.array(freqs))
return np.array(
[np.linalg.inv(z*I-bigA)[-D:,-2*D:-D][to_idx,from_idx]
for z in zs])
def is_affine(A):
return bool(A.shape[1] % A.shape[0])
def is_stable(A):
bigA = canonical_matrix(A)
return np.all(np.abs(np.linalg.eigvals(bigA)) < 1.)
def dimensions(A):
if is_affine(A):
A = A[:,:-1]
D, nlags = A.shape[0], A.shape[1] // A.shape[0]
return D, nlags, is_affine(A)
| gpl-2.0 |
mapbased/phantomjs | src/qt/qtbase/util/local_database/cldr2qlocalexml.py | 102 | 42691 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os
import sys
import enumdata
import xpathlite
from xpathlite import DraftResolution
from dateconverter import convert_date
from xml.sax.saxutils import escape, unescape
import re
findAlias = xpathlite.findAlias
findEntry = xpathlite.findEntry
findEntryInFile = xpathlite._findEntryInFile
findTagsInFile = xpathlite.findTagsInFile
def parse_number_format(patterns, data):
# this is a very limited parsing of the number format for currency only.
def skip_repeating_pattern(x):
p = x.replace('0', '#').replace(',', '').replace('.', '')
seen = False
result = ''
for c in p:
if c == '#':
if seen:
continue
seen = True
else:
seen = False
result = result + c
return result
patterns = patterns.split(';')
result = []
for pattern in patterns:
pattern = skip_repeating_pattern(pattern)
pattern = pattern.replace('#', "%1")
# according to http://www.unicode.org/reports/tr35/#Number_Format_Patterns
# there can be doubled or trippled currency sign, however none of the
# locales use that.
pattern = pattern.replace(u'\xa4', "%2")
pattern = pattern.replace("''", "###").replace("'", '').replace("###", "'")
pattern = pattern.replace('-', data['minus'])
pattern = pattern.replace('+', data['plus'])
result.append(pattern)
return result
def parse_list_pattern_part_format(pattern):
# this is a very limited parsing of the format for list pattern part only.
result = ""
result = pattern.replace("{0}", "%1")
result = result.replace("{1}", "%2")
result = result.replace("{2}", "%3")
return result
def ordStr(c):
if len(c) == 1:
return str(ord(c))
raise xpathlite.Error("Unable to handle value \"%s\"" % addEscapes(c))
return "##########"
# the following functions are supposed to fix the problem with QLocale
# returning a character instead of strings for QLocale::exponential()
# and others. So we fallback to default values in these cases.
def fixOrdStrMinus(c):
if len(c) == 1:
return str(ord(c))
return str(ord('-'))
def fixOrdStrPlus(c):
if len(c) == 1:
return str(ord(c))
return str(ord('+'))
def fixOrdStrExp(c):
if len(c) == 1:
return str(ord(c))
return str(ord('e'))
def fixOrdStrPercent(c):
if len(c) == 1:
return str(ord(c))
return str(ord('%'))
def fixOrdStrList(c):
if len(c) == 1:
return str(ord(c))
return str(ord(';'))
def generateLocaleInfo(path):
(dir_name, file_name) = os.path.split(path)
if not path.endswith(".xml"):
return {}
# skip legacy/compatibility ones
alias = findAlias(path)
if alias:
raise xpathlite.Error("alias to \"%s\"" % alias)
language_code = findEntryInFile(path, "identity/language", attribute="type")[0]
if language_code == 'root':
# just skip it
return {}
country_code = findEntryInFile(path, "identity/territory", attribute="type")[0]
script_code = findEntryInFile(path, "identity/script", attribute="type")[0]
variant_code = findEntryInFile(path, "identity/variant", attribute="type")[0]
# we do not support variants
# ### actually there is only one locale with variant: en_US_POSIX
# does anybody care about it at all?
if variant_code:
raise xpathlite.Error("we do not support variants (\"%s\")" % variant_code)
language_id = enumdata.languageCodeToId(language_code)
if language_id <= 0:
raise xpathlite.Error("unknown language code \"%s\"" % language_code)
language = enumdata.language_list[language_id][0]
script_id = enumdata.scriptCodeToId(script_code)
if script_id == -1:
raise xpathlite.Error("unknown script code \"%s\"" % script_code)
script = enumdata.script_list[script_id][0]
# we should handle fully qualified names with the territory
if not country_code:
return {}
country_id = enumdata.countryCodeToId(country_code)
if country_id <= 0:
raise xpathlite.Error("unknown country code \"%s\"" % country_code)
country = enumdata.country_list[country_id][0]
# So we say we accept only those values that have "contributed" or
# "approved" resolution. see http://www.unicode.org/cldr/process.html
# But we only respect the resolution for new datas for backward
# compatibility.
draft = DraftResolution.contributed
result = {}
result['language'] = language
result['script'] = script
result['country'] = country
result['language_code'] = language_code
result['country_code'] = country_code
result['script_code'] = script_code
result['variant_code'] = variant_code
result['language_id'] = language_id
result['script_id'] = script_id
result['country_id'] = country_id
supplementalPath = dir_name + "/../supplemental/supplementalData.xml"
currencies = findTagsInFile(supplementalPath, "currencyData/region[iso3166=%s]"%country_code);
result['currencyIsoCode'] = ''
result['currencyDigits'] = 2
result['currencyRounding'] = 1
if currencies:
for e in currencies:
if e[0] == 'currency':
tender = True
t = filter(lambda x: x[0] == 'tender', e[1])
if t and t[0][1] == 'false':
tender = False;
if tender and not filter(lambda x: x[0] == 'to', e[1]):
result['currencyIsoCode'] = filter(lambda x: x[0] == 'iso4217', e[1])[0][1]
break
if result['currencyIsoCode']:
t = findTagsInFile(supplementalPath, "currencyData/fractions/info[iso4217=%s]"%result['currencyIsoCode']);
if t and t[0][0] == 'info':
result['currencyDigits'] = int(filter(lambda x: x[0] == 'digits', t[0][1])[0][1])
result['currencyRounding'] = int(filter(lambda x: x[0] == 'rounding', t[0][1])[0][1])
numbering_system = None
try:
numbering_system = findEntry(path, "numbers/defaultNumberingSystem")
except:
pass
def findEntryDef(path, xpath, value=''):
try:
return findEntry(path, xpath)
except xpathlite.Error:
return value
def get_number_in_system(path, xpath, numbering_system):
if numbering_system:
try:
return findEntry(path, xpath + "[numberSystem=" + numbering_system + "]")
except xpathlite.Error:
# in CLDR 1.9 number system was refactored for numbers (but not for currency)
# so if previous findEntry doesn't work we should try this:
try:
return findEntry(path, xpath.replace("/symbols/", "/symbols[numberSystem=" + numbering_system + "]/"))
except xpathlite.Error:
# fallback to default
pass
return findEntry(path, xpath)
result['decimal'] = get_number_in_system(path, "numbers/symbols/decimal", numbering_system)
result['group'] = get_number_in_system(path, "numbers/symbols/group", numbering_system)
result['list'] = get_number_in_system(path, "numbers/symbols/list", numbering_system)
result['percent'] = get_number_in_system(path, "numbers/symbols/percentSign", numbering_system)
try:
numbering_systems = {}
for ns in findTagsInFile(cldr_dir + "/../supplemental/numberingSystems.xml", "numberingSystems"):
tmp = {}
id = ""
for data in ns[1:][0]: # ns looks like this: [u'numberingSystem', [(u'digits', u'0123456789'), (u'type', u'numeric'), (u'id', u'latn')]]
tmp[data[0]] = data[1]
if data[0] == u"id":
id = data[1]
numbering_systems[id] = tmp
result['zero'] = numbering_systems[numbering_system][u"digits"][0]
except e:
sys.stderr.write("Native zero detection problem:\n" + str(e) + "\n")
result['zero'] = get_number_in_system(path, "numbers/symbols/nativeZeroDigit", numbering_system)
result['minus'] = get_number_in_system(path, "numbers/symbols/minusSign", numbering_system)
result['plus'] = get_number_in_system(path, "numbers/symbols/plusSign", numbering_system)
result['exp'] = get_number_in_system(path, "numbers/symbols/exponential", numbering_system).lower()
result['quotationStart'] = findEntry(path, "delimiters/quotationStart")
result['quotationEnd'] = findEntry(path, "delimiters/quotationEnd")
result['alternateQuotationStart'] = findEntry(path, "delimiters/alternateQuotationStart")
result['alternateQuotationEnd'] = findEntry(path, "delimiters/alternateQuotationEnd")
result['listPatternPartStart'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[start]"))
result['listPatternPartMiddle'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[middle]"))
result['listPatternPartEnd'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[end]"))
result['listPatternPartTwo'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[2]"))
result['am'] = findEntry(path, "dates/calendars/calendar[gregorian]/dayPeriods/dayPeriodContext[format]/dayPeriodWidth[wide]/dayPeriod[am]", draft)
result['pm'] = findEntry(path, "dates/calendars/calendar[gregorian]/dayPeriods/dayPeriodContext[format]/dayPeriodWidth[wide]/dayPeriod[pm]", draft)
result['longDateFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/dateFormats/dateFormatLength[full]/dateFormat/pattern"))
result['shortDateFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/dateFormats/dateFormatLength[short]/dateFormat/pattern"))
result['longTimeFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/timeFormats/timeFormatLength[full]/timeFormat/pattern"))
result['shortTimeFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/timeFormats/timeFormatLength[short]/timeFormat/pattern"))
endonym = None
if country_code and script_code:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s_%s]" % (language_code, script_code, country_code))
if not endonym and script_code:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s]" % (language_code, script_code))
if not endonym and country_code:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s]" % (language_code, country_code))
if not endonym:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s]" % (language_code))
result['language_endonym'] = endonym
result['country_endonym'] = findEntryDef(path, "localeDisplayNames/territories/territory[type=%s]" % (country_code))
currency_format = get_number_in_system(path, "numbers/currencyFormats/currencyFormatLength/currencyFormat/pattern", numbering_system)
currency_format = parse_number_format(currency_format, result)
result['currencyFormat'] = currency_format[0]
result['currencyNegativeFormat'] = ''
if len(currency_format) > 1:
result['currencyNegativeFormat'] = currency_format[1]
result['currencySymbol'] = ''
result['currencyDisplayName'] = ''
if result['currencyIsoCode']:
result['currencySymbol'] = findEntryDef(path, "numbers/currencies/currency[%s]/symbol" % result['currencyIsoCode'])
display_name_path = "numbers/currencies/currency[%s]/displayName" % result['currencyIsoCode']
result['currencyDisplayName'] \
= findEntryDef(path, display_name_path) + ";" \
+ findEntryDef(path, display_name_path + "[count=zero]") + ";" \
+ findEntryDef(path, display_name_path + "[count=one]") + ";" \
+ findEntryDef(path, display_name_path + "[count=two]") + ";" \
+ findEntryDef(path, display_name_path + "[count=few]") + ";" \
+ findEntryDef(path, display_name_path + "[count=many]") + ";" \
+ findEntryDef(path, display_name_path + "[count=other]") + ";"
standalone_long_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[wide]/month"
result['standaloneLongMonths'] \
= findEntry(path, standalone_long_month_path + "[1]") + ";" \
+ findEntry(path, standalone_long_month_path + "[2]") + ";" \
+ findEntry(path, standalone_long_month_path + "[3]") + ";" \
+ findEntry(path, standalone_long_month_path + "[4]") + ";" \
+ findEntry(path, standalone_long_month_path + "[5]") + ";" \
+ findEntry(path, standalone_long_month_path + "[6]") + ";" \
+ findEntry(path, standalone_long_month_path + "[7]") + ";" \
+ findEntry(path, standalone_long_month_path + "[8]") + ";" \
+ findEntry(path, standalone_long_month_path + "[9]") + ";" \
+ findEntry(path, standalone_long_month_path + "[10]") + ";" \
+ findEntry(path, standalone_long_month_path + "[11]") + ";" \
+ findEntry(path, standalone_long_month_path + "[12]") + ";"
standalone_short_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[abbreviated]/month"
result['standaloneShortMonths'] \
= findEntry(path, standalone_short_month_path + "[1]") + ";" \
+ findEntry(path, standalone_short_month_path + "[2]") + ";" \
+ findEntry(path, standalone_short_month_path + "[3]") + ";" \
+ findEntry(path, standalone_short_month_path + "[4]") + ";" \
+ findEntry(path, standalone_short_month_path + "[5]") + ";" \
+ findEntry(path, standalone_short_month_path + "[6]") + ";" \
+ findEntry(path, standalone_short_month_path + "[7]") + ";" \
+ findEntry(path, standalone_short_month_path + "[8]") + ";" \
+ findEntry(path, standalone_short_month_path + "[9]") + ";" \
+ findEntry(path, standalone_short_month_path + "[10]") + ";" \
+ findEntry(path, standalone_short_month_path + "[11]") + ";" \
+ findEntry(path, standalone_short_month_path + "[12]") + ";"
standalone_narrow_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[narrow]/month"
result['standaloneNarrowMonths'] \
= findEntry(path, standalone_narrow_month_path + "[1]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[2]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[3]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[4]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[5]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[6]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[7]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[8]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[9]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[10]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[11]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[12]") + ";"
long_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[wide]/month"
result['longMonths'] \
= findEntry(path, long_month_path + "[1]") + ";" \
+ findEntry(path, long_month_path + "[2]") + ";" \
+ findEntry(path, long_month_path + "[3]") + ";" \
+ findEntry(path, long_month_path + "[4]") + ";" \
+ findEntry(path, long_month_path + "[5]") + ";" \
+ findEntry(path, long_month_path + "[6]") + ";" \
+ findEntry(path, long_month_path + "[7]") + ";" \
+ findEntry(path, long_month_path + "[8]") + ";" \
+ findEntry(path, long_month_path + "[9]") + ";" \
+ findEntry(path, long_month_path + "[10]") + ";" \
+ findEntry(path, long_month_path + "[11]") + ";" \
+ findEntry(path, long_month_path + "[12]") + ";"
short_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[abbreviated]/month"
result['shortMonths'] \
= findEntry(path, short_month_path + "[1]") + ";" \
+ findEntry(path, short_month_path + "[2]") + ";" \
+ findEntry(path, short_month_path + "[3]") + ";" \
+ findEntry(path, short_month_path + "[4]") + ";" \
+ findEntry(path, short_month_path + "[5]") + ";" \
+ findEntry(path, short_month_path + "[6]") + ";" \
+ findEntry(path, short_month_path + "[7]") + ";" \
+ findEntry(path, short_month_path + "[8]") + ";" \
+ findEntry(path, short_month_path + "[9]") + ";" \
+ findEntry(path, short_month_path + "[10]") + ";" \
+ findEntry(path, short_month_path + "[11]") + ";" \
+ findEntry(path, short_month_path + "[12]") + ";"
narrow_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[narrow]/month"
result['narrowMonths'] \
= findEntry(path, narrow_month_path + "[1]") + ";" \
+ findEntry(path, narrow_month_path + "[2]") + ";" \
+ findEntry(path, narrow_month_path + "[3]") + ";" \
+ findEntry(path, narrow_month_path + "[4]") + ";" \
+ findEntry(path, narrow_month_path + "[5]") + ";" \
+ findEntry(path, narrow_month_path + "[6]") + ";" \
+ findEntry(path, narrow_month_path + "[7]") + ";" \
+ findEntry(path, narrow_month_path + "[8]") + ";" \
+ findEntry(path, narrow_month_path + "[9]") + ";" \
+ findEntry(path, narrow_month_path + "[10]") + ";" \
+ findEntry(path, narrow_month_path + "[11]") + ";" \
+ findEntry(path, narrow_month_path + "[12]") + ";"
long_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[wide]/day"
result['longDays'] \
= findEntry(path, long_day_path + "[sun]") + ";" \
+ findEntry(path, long_day_path + "[mon]") + ";" \
+ findEntry(path, long_day_path + "[tue]") + ";" \
+ findEntry(path, long_day_path + "[wed]") + ";" \
+ findEntry(path, long_day_path + "[thu]") + ";" \
+ findEntry(path, long_day_path + "[fri]") + ";" \
+ findEntry(path, long_day_path + "[sat]") + ";"
short_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[abbreviated]/day"
result['shortDays'] \
= findEntry(path, short_day_path + "[sun]") + ";" \
+ findEntry(path, short_day_path + "[mon]") + ";" \
+ findEntry(path, short_day_path + "[tue]") + ";" \
+ findEntry(path, short_day_path + "[wed]") + ";" \
+ findEntry(path, short_day_path + "[thu]") + ";" \
+ findEntry(path, short_day_path + "[fri]") + ";" \
+ findEntry(path, short_day_path + "[sat]") + ";"
narrow_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[narrow]/day"
result['narrowDays'] \
= findEntry(path, narrow_day_path + "[sun]") + ";" \
+ findEntry(path, narrow_day_path + "[mon]") + ";" \
+ findEntry(path, narrow_day_path + "[tue]") + ";" \
+ findEntry(path, narrow_day_path + "[wed]") + ";" \
+ findEntry(path, narrow_day_path + "[thu]") + ";" \
+ findEntry(path, narrow_day_path + "[fri]") + ";" \
+ findEntry(path, narrow_day_path + "[sat]") + ";"
standalone_long_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[wide]/day"
result['standaloneLongDays'] \
= findEntry(path, standalone_long_day_path + "[sun]") + ";" \
+ findEntry(path, standalone_long_day_path + "[mon]") + ";" \
+ findEntry(path, standalone_long_day_path + "[tue]") + ";" \
+ findEntry(path, standalone_long_day_path + "[wed]") + ";" \
+ findEntry(path, standalone_long_day_path + "[thu]") + ";" \
+ findEntry(path, standalone_long_day_path + "[fri]") + ";" \
+ findEntry(path, standalone_long_day_path + "[sat]") + ";"
standalone_short_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[abbreviated]/day"
result['standaloneShortDays'] \
= findEntry(path, standalone_short_day_path + "[sun]") + ";" \
+ findEntry(path, standalone_short_day_path + "[mon]") + ";" \
+ findEntry(path, standalone_short_day_path + "[tue]") + ";" \
+ findEntry(path, standalone_short_day_path + "[wed]") + ";" \
+ findEntry(path, standalone_short_day_path + "[thu]") + ";" \
+ findEntry(path, standalone_short_day_path + "[fri]") + ";" \
+ findEntry(path, standalone_short_day_path + "[sat]") + ";"
standalone_narrow_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[narrow]/day"
result['standaloneNarrowDays'] \
= findEntry(path, standalone_narrow_day_path + "[sun]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[mon]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[tue]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[wed]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[thu]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[fri]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[sat]") + ";"
return result
def addEscapes(s):
result = ''
for c in s:
n = ord(c)
if n < 128:
result += c
else:
result += "\\x"
result += "%02x" % (n)
return result
def unicodeStr(s):
utf8 = s.encode('utf-8')
return "<size>" + str(len(utf8)) + "</size><data>" + addEscapes(utf8) + "</data>"
def usage():
print "Usage: cldr2qlocalexml.py <path-to-cldr-main>"
sys.exit()
def integrateWeekData(filePath):
if not filePath.endswith(".xml"):
return {}
monFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=mon]", attribute="territories")[0].split(" ")
tueFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=tue]", attribute="territories")[0].split(" ")
wedFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=wed]", attribute="territories")[0].split(" ")
thuFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=thu]", attribute="territories")[0].split(" ")
friFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=fri]", attribute="territories")[0].split(" ")
satFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=sat]", attribute="territories")[0].split(" ")
sunFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=sun]", attribute="territories")[0].split(" ")
monWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=mon]", attribute="territories")[0].split(" ")
tueWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=tue]", attribute="territories")[0].split(" ")
wedWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=wed]", attribute="territories")[0].split(" ")
thuWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=thu]", attribute="territories")[0].split(" ")
friWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=fri]", attribute="territories")[0].split(" ")
satWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=sat]", attribute="territories")[0].split(" ")
sunWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=sun]", attribute="territories")[0].split(" ")
monWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=mon]", attribute="territories")[0].split(" ")
tueWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=tue]", attribute="territories")[0].split(" ")
wedWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=wed]", attribute="territories")[0].split(" ")
thuWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=thu]", attribute="territories")[0].split(" ")
friWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=fri]", attribute="territories")[0].split(" ")
satWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=sat]", attribute="territories")[0].split(" ")
sunWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=sun]", attribute="territories")[0].split(" ")
firstDayByCountryCode = {}
for countryCode in monFirstDayIn:
firstDayByCountryCode[countryCode] = "mon"
for countryCode in tueFirstDayIn:
firstDayByCountryCode[countryCode] = "tue"
for countryCode in wedFirstDayIn:
firstDayByCountryCode[countryCode] = "wed"
for countryCode in thuFirstDayIn:
firstDayByCountryCode[countryCode] = "thu"
for countryCode in friFirstDayIn:
firstDayByCountryCode[countryCode] = "fri"
for countryCode in satFirstDayIn:
firstDayByCountryCode[countryCode] = "sat"
for countryCode in sunFirstDayIn:
firstDayByCountryCode[countryCode] = "sun"
weekendStartByCountryCode = {}
for countryCode in monWeekendStart:
weekendStartByCountryCode[countryCode] = "mon"
for countryCode in tueWeekendStart:
weekendStartByCountryCode[countryCode] = "tue"
for countryCode in wedWeekendStart:
weekendStartByCountryCode[countryCode] = "wed"
for countryCode in thuWeekendStart:
weekendStartByCountryCode[countryCode] = "thu"
for countryCode in friWeekendStart:
weekendStartByCountryCode[countryCode] = "fri"
for countryCode in satWeekendStart:
weekendStartByCountryCode[countryCode] = "sat"
for countryCode in sunWeekendStart:
weekendStartByCountryCode[countryCode] = "sun"
weekendEndByCountryCode = {}
for countryCode in monWeekendEnd:
weekendEndByCountryCode[countryCode] = "mon"
for countryCode in tueWeekendEnd:
weekendEndByCountryCode[countryCode] = "tue"
for countryCode in wedWeekendEnd:
weekendEndByCountryCode[countryCode] = "wed"
for countryCode in thuWeekendEnd:
weekendEndByCountryCode[countryCode] = "thu"
for countryCode in friWeekendEnd:
weekendEndByCountryCode[countryCode] = "fri"
for countryCode in satWeekendEnd:
weekendEndByCountryCode[countryCode] = "sat"
for countryCode in sunWeekendEnd:
weekendEndByCountryCode[countryCode] = "sun"
for (key,locale) in locale_database.iteritems():
countryCode = locale['country_code']
if countryCode in firstDayByCountryCode:
locale_database[key]['firstDayOfWeek'] = firstDayByCountryCode[countryCode]
else:
locale_database[key]['firstDayOfWeek'] = firstDayByCountryCode["001"]
if countryCode in weekendStartByCountryCode:
locale_database[key]['weekendStart'] = weekendStartByCountryCode[countryCode]
else:
locale_database[key]['weekendStart'] = weekendStartByCountryCode["001"]
if countryCode in weekendEndByCountryCode:
locale_database[key]['weekendEnd'] = weekendEndByCountryCode[countryCode]
else:
locale_database[key]['weekendEnd'] = weekendEndByCountryCode["001"]
if len(sys.argv) != 2:
usage()
cldr_dir = sys.argv[1]
if not os.path.isdir(cldr_dir):
usage()
cldr_files = os.listdir(cldr_dir)
locale_database = {}
for file in cldr_files:
try:
l = generateLocaleInfo(cldr_dir + "/" + file)
if not l:
sys.stderr.write("skipping file \"" + file + "\"\n")
continue
except xpathlite.Error as e:
sys.stderr.write("skipping file \"%s\" (%s)\n" % (file, str(e)))
continue
locale_database[(l['language_id'], l['script_id'], l['country_id'], l['variant_code'])] = l
integrateWeekData(cldr_dir+"/../supplemental/supplementalData.xml")
locale_keys = locale_database.keys()
locale_keys.sort()
cldr_version = 'unknown'
ldml = open(cldr_dir+"/../dtd/ldml.dtd", "r")
for line in ldml:
if 'version cldrVersion CDATA #FIXED' in line:
cldr_version = line.split('"')[1]
print "<localeDatabase>"
print " <version>" + cldr_version + "</version>"
print " <languageList>"
for id in enumdata.language_list:
l = enumdata.language_list[id]
print " <language>"
print " <name>" + l[0] + "</name>"
print " <id>" + str(id) + "</id>"
print " <code>" + l[1] + "</code>"
print " </language>"
print " </languageList>"
print " <scriptList>"
for id in enumdata.script_list:
l = enumdata.script_list[id]
print " <script>"
print " <name>" + l[0] + "</name>"
print " <id>" + str(id) + "</id>"
print " <code>" + l[1] + "</code>"
print " </script>"
print " </scriptList>"
print " <countryList>"
for id in enumdata.country_list:
l = enumdata.country_list[id]
print " <country>"
print " <name>" + l[0] + "</name>"
print " <id>" + str(id) + "</id>"
print " <code>" + l[1] + "</code>"
print " </country>"
print " </countryList>"
def _parseLocale(l):
language = "AnyLanguage"
script = "AnyScript"
country = "AnyCountry"
if l == "und":
raise xpathlite.Error("we are treating unknown locale like C")
items = l.split("_")
language_code = items[0]
if language_code != "und":
language_id = enumdata.languageCodeToId(language_code)
if language_id == -1:
raise xpathlite.Error("unknown language code \"%s\"" % language_code)
language = enumdata.language_list[language_id][0]
if len(items) > 1:
script_code = items[1]
country_code = ""
if len(items) > 2:
country_code = items[2]
if len(script_code) == 4:
script_id = enumdata.scriptCodeToId(script_code)
if script_id == -1:
raise xpathlite.Error("unknown script code \"%s\"" % script_code)
script = enumdata.script_list[script_id][0]
else:
country_code = script_code
if country_code:
country_id = enumdata.countryCodeToId(country_code)
if country_id == -1:
raise xpathlite.Error("unknown country code \"%s\"" % country_code)
country = enumdata.country_list[country_id][0]
return (language, script, country)
print " <likelySubtags>"
for ns in findTagsInFile(cldr_dir + "/../supplemental/likelySubtags.xml", "likelySubtags"):
tmp = {}
for data in ns[1:][0]: # ns looks like this: [u'likelySubtag', [(u'from', u'aa'), (u'to', u'aa_Latn_ET')]]
tmp[data[0]] = data[1]
try:
(from_language, from_script, from_country) = _parseLocale(tmp[u"from"])
except xpathlite.Error as e:
sys.stderr.write("skipping likelySubtag \"%s\" -> \"%s\" (%s)\n" % (tmp[u"from"], tmp[u"to"], str(e)))
continue
try:
(to_language, to_script, to_country) = _parseLocale(tmp[u"to"])
except xpathlite.Error as e:
sys.stderr.write("skipping likelySubtag \"%s\" -> \"%s\" (%s)\n" % (tmp[u"from"], tmp[u"to"], str(e)))
continue
# substitute according to http://www.unicode.org/reports/tr35/#Likely_Subtags
if to_country == "AnyCountry" and from_country != to_country:
to_country = from_country
if to_script == "AnyScript" and from_script != to_script:
to_script = from_script
print " <likelySubtag>"
print " <from>"
print " <language>" + from_language + "</language>"
print " <script>" + from_script + "</script>"
print " <country>" + from_country + "</country>"
print " </from>"
print " <to>"
print " <language>" + to_language + "</language>"
print " <script>" + to_script + "</script>"
print " <country>" + to_country + "</country>"
print " </to>"
print " </likelySubtag>"
print " </likelySubtags>"
print " <localeList>"
print \
" <locale>\n\
<language>C</language>\n\
<languageEndonym></languageEndonym>\n\
<script>AnyScript</script>\n\
<country>AnyCountry</country>\n\
<countryEndonym></countryEndonym>\n\
<decimal>46</decimal>\n\
<group>44</group>\n\
<list>59</list>\n\
<percent>37</percent>\n\
<zero>48</zero>\n\
<minus>45</minus>\n\
<plus>43</plus>\n\
<exp>101</exp>\n\
<quotationStart>\"</quotationStart>\n\
<quotationEnd>\"</quotationEnd>\n\
<alternateQuotationStart>\'</alternateQuotationStart>\n\
<alternateQuotationEnd>\'</alternateQuotationEnd>\n\
<listPatternPartStart>%1, %2</listPatternPartStart>\n\
<listPatternPartMiddle>%1, %2</listPatternPartMiddle>\n\
<listPatternPartEnd>%1, %2</listPatternPartEnd>\n\
<listPatternPartTwo>%1, %2</listPatternPartTwo>\n\
<am>AM</am>\n\
<pm>PM</pm>\n\
<firstDayOfWeek>mon</firstDayOfWeek>\n\
<weekendStart>sat</weekendStart>\n\
<weekendEnd>sun</weekendEnd>\n\
<longDateFormat>EEEE, d MMMM yyyy</longDateFormat>\n\
<shortDateFormat>d MMM yyyy</shortDateFormat>\n\
<longTimeFormat>HH:mm:ss z</longTimeFormat>\n\
<shortTimeFormat>HH:mm:ss</shortTimeFormat>\n\
<standaloneLongMonths>January;February;March;April;May;June;July;August;September;October;November;December;</standaloneLongMonths>\n\
<standaloneShortMonths>Jan;Feb;Mar;Apr;May;Jun;Jul;Aug;Sep;Oct;Nov;Dec;</standaloneShortMonths>\n\
<standaloneNarrowMonths>J;F;M;A;M;J;J;A;S;O;N;D;</standaloneNarrowMonths>\n\
<longMonths>January;February;March;April;May;June;July;August;September;October;November;December;</longMonths>\n\
<shortMonths>Jan;Feb;Mar;Apr;May;Jun;Jul;Aug;Sep;Oct;Nov;Dec;</shortMonths>\n\
<narrowMonths>1;2;3;4;5;6;7;8;9;10;11;12;</narrowMonths>\n\
<longDays>Sunday;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;</longDays>\n\
<shortDays>Sun;Mon;Tue;Wed;Thu;Fri;Sat;</shortDays>\n\
<narrowDays>7;1;2;3;4;5;6;</narrowDays>\n\
<standaloneLongDays>Sunday;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;</standaloneLongDays>\n\
<standaloneShortDays>Sun;Mon;Tue;Wed;Thu;Fri;Sat;</standaloneShortDays>\n\
<standaloneNarrowDays>S;M;T;W;T;F;S;</standaloneNarrowDays>\n\
<currencyIsoCode></currencyIsoCode>\n\
<currencySymbol></currencySymbol>\n\
<currencyDisplayName>;;;;;;;</currencyDisplayName>\n\
<currencyDigits>2</currencyDigits>\n\
<currencyRounding>1</currencyRounding>\n\
<currencyFormat>%1%2</currencyFormat>\n\
<currencyNegativeFormat></currencyNegativeFormat>\n\
</locale>"
for key in locale_keys:
l = locale_database[key]
print " <locale>"
print " <language>" + l['language'] + "</language>"
print " <languageEndonym>" + escape(l['language_endonym']).encode('utf-8') + "</languageEndonym>"
print " <script>" + l['script'] + "</script>"
print " <country>" + l['country'] + "</country>"
print " <countryEndonym>" + escape(l['country_endonym']).encode('utf-8') + "</countryEndonym>"
print " <languagecode>" + l['language_code'] + "</languagecode>"
print " <scriptcode>" + l['script_code'] + "</scriptcode>"
print " <countrycode>" + l['country_code'] + "</countrycode>"
print " <decimal>" + ordStr(l['decimal']) + "</decimal>"
print " <group>" + ordStr(l['group']) + "</group>"
print " <list>" + fixOrdStrList(l['list']) + "</list>"
print " <percent>" + fixOrdStrPercent(l['percent']) + "</percent>"
print " <zero>" + ordStr(l['zero']) + "</zero>"
print " <minus>" + fixOrdStrMinus(l['minus']) + "</minus>"
print " <plus>" + fixOrdStrPlus(l['plus']) + "</plus>"
print " <exp>" + fixOrdStrExp(l['exp']) + "</exp>"
print " <quotationStart>" + l['quotationStart'].encode('utf-8') + "</quotationStart>"
print " <quotationEnd>" + l['quotationEnd'].encode('utf-8') + "</quotationEnd>"
print " <alternateQuotationStart>" + l['alternateQuotationStart'].encode('utf-8') + "</alternateQuotationStart>"
print " <alternateQuotationEnd>" + l['alternateQuotationEnd'].encode('utf-8') + "</alternateQuotationEnd>"
print " <listPatternPartStart>" + l['listPatternPartStart'].encode('utf-8') + "</listPatternPartStart>"
print " <listPatternPartMiddle>" + l['listPatternPartMiddle'].encode('utf-8') + "</listPatternPartMiddle>"
print " <listPatternPartEnd>" + l['listPatternPartEnd'].encode('utf-8') + "</listPatternPartEnd>"
print " <listPatternPartTwo>" + l['listPatternPartTwo'].encode('utf-8') + "</listPatternPartTwo>"
print " <am>" + l['am'].encode('utf-8') + "</am>"
print " <pm>" + l['pm'].encode('utf-8') + "</pm>"
print " <firstDayOfWeek>" + l['firstDayOfWeek'].encode('utf-8') + "</firstDayOfWeek>"
print " <weekendStart>" + l['weekendStart'].encode('utf-8') + "</weekendStart>"
print " <weekendEnd>" + l['weekendEnd'].encode('utf-8') + "</weekendEnd>"
print " <longDateFormat>" + l['longDateFormat'].encode('utf-8') + "</longDateFormat>"
print " <shortDateFormat>" + l['shortDateFormat'].encode('utf-8') + "</shortDateFormat>"
print " <longTimeFormat>" + l['longTimeFormat'].encode('utf-8') + "</longTimeFormat>"
print " <shortTimeFormat>" + l['shortTimeFormat'].encode('utf-8') + "</shortTimeFormat>"
print " <standaloneLongMonths>" + l['standaloneLongMonths'].encode('utf-8') + "</standaloneLongMonths>"
print " <standaloneShortMonths>"+ l['standaloneShortMonths'].encode('utf-8') + "</standaloneShortMonths>"
print " <standaloneNarrowMonths>"+ l['standaloneNarrowMonths'].encode('utf-8') + "</standaloneNarrowMonths>"
print " <longMonths>" + l['longMonths'].encode('utf-8') + "</longMonths>"
print " <shortMonths>" + l['shortMonths'].encode('utf-8') + "</shortMonths>"
print " <narrowMonths>" + l['narrowMonths'].encode('utf-8') + "</narrowMonths>"
print " <longDays>" + l['longDays'].encode('utf-8') + "</longDays>"
print " <shortDays>" + l['shortDays'].encode('utf-8') + "</shortDays>"
print " <narrowDays>" + l['narrowDays'].encode('utf-8') + "</narrowDays>"
print " <standaloneLongDays>" + l['standaloneLongDays'].encode('utf-8') + "</standaloneLongDays>"
print " <standaloneShortDays>" + l['standaloneShortDays'].encode('utf-8') + "</standaloneShortDays>"
print " <standaloneNarrowDays>" + l['standaloneNarrowDays'].encode('utf-8') + "</standaloneNarrowDays>"
print " <currencyIsoCode>" + l['currencyIsoCode'].encode('utf-8') + "</currencyIsoCode>"
print " <currencySymbol>" + l['currencySymbol'].encode('utf-8') + "</currencySymbol>"
print " <currencyDisplayName>" + l['currencyDisplayName'].encode('utf-8') + "</currencyDisplayName>"
print " <currencyDigits>" + str(l['currencyDigits']) + "</currencyDigits>"
print " <currencyRounding>" + str(l['currencyRounding']) + "</currencyRounding>"
print " <currencyFormat>" + l['currencyFormat'].encode('utf-8') + "</currencyFormat>"
print " <currencyNegativeFormat>" + l['currencyNegativeFormat'].encode('utf-8') + "</currencyNegativeFormat>"
print " </locale>"
print " </localeList>"
print "</localeDatabase>"
| bsd-3-clause |
hoangt/tpzsimul.gem5 | src/python/m5/util/sorteddict.py | 84 | 6183 | # Copyright (c) 2006-2009 Nathan Binkert <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bisect import bisect_left, bisect_right
class SortedDict(dict):
def _get_sorted(self):
return getattr(self, '_sorted', sorted)
def _set_sorted(self, val):
self._sorted = val
self._del_keys()
sorted = property(_get_sorted, _set_sorted)
@property
def _keys(self):
try:
return self._sorted_keys
except AttributeError:
_sorted_keys = self.sorted(dict.iterkeys(self))
self._sorted_keys = _sorted_keys
return _sorted_keys
def _left_eq(self, key):
index = self._left_ge(self, key)
if self._keys[index] != key:
raise KeyError(key)
return index
def _right_eq(self, key):
index = self._right_le(self, key)
if self._keys[index] != key:
raise KeyError(key)
return index
def _right_lt(self, key):
index = bisect_left(self._keys, key)
if index:
return index - 1
raise KeyError(key)
def _right_le(self, key):
index = bisect_right(self._keys, key)
if index:
return index - 1
raise KeyError(key)
def _left_gt(self, key):
index = bisect_right(self._keys, key)
if index != len(self._keys):
return index
raise KeyError(key)
def _left_ge(self, key):
index = bisect_left(self._keys, key)
if index != len(self._keys):
return index
raise KeyError(key)
def _del_keys(self):
try:
del self._sorted_keys
except AttributeError:
pass
def __repr__(self):
return 'SortedDict({%s})' % ', '.join('%r: %r' % item
for item in self.iteritems())
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
self._del_keys()
def __delitem__(self, key):
dict.__delitem__(self, key)
self._del_keys()
def clear(self):
self.data.clear()
self._del_keys()
def copy(self):
t = type(self)
return t(self)
def keys(self):
return self._keys[:]
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def iterkeys(self):
return iter(self._keys)
def itervalues(self):
for k in self._keys:
yield self[k]
def iteritems(self):
for k in self._keys:
yield k, self[k]
def keyrange(self, start=None, end=None, inclusive=False):
if start is not None:
start = self._left_ge(start)
if end is not None:
if inclusive:
end = self._right_le(end)
else:
end = self._right_lt(end)
return iter(self._keys[start:end+1])
def valuerange(self, *args, **kwargs):
for k in self.keyrange(*args, **kwargs):
yield self[k]
def itemrange(self, *args, **kwargs):
for k in self.keyrange(*args, **kwargs):
yield k, self[k]
def update(self, *args, **kwargs):
dict.update(self, *args, **kwargs)
self._del_keys()
def setdefault(self, key, _failobj=None):
try:
return self[key]
except KeyError:
self[key] = _failobj
def pop(self, key, *args):
try:
dict.pop(self, key)
self._del_keys()
except KeyError:
if not args:
raise
return args[0]
def popitem(self):
try:
key = self._keys[0]
self._del_keys()
except IndexError:
raise KeyError('popitem(): dictionary is empty')
else:
return key, dict.pop(self, key)
@classmethod
def fromkeys(cls, seq, value=None):
d = cls()
for key in seq:
d[key] = value
return d
if __name__ == '__main__':
def display(d):
print d
print d.keys()
print list(d.iterkeys())
print d.values()
print list(d.itervalues())
print d.items()
print list(d.iteritems())
d = SortedDict(x=24,e=5,j=4,b=2,z=26,d=4)
display(d)
print 'popitem', d.popitem()
display(d)
print 'pop j'
d.pop('j')
display(d)
d.setdefault('a', 1)
d.setdefault('g', 7)
d.setdefault('_')
display(d)
d.update({'b' : 2, 'h' : 8})
display(d)
del d['x']
display(d)
d['y'] = 26
display(d)
print `d`
print d.copy()
for k,v in d.itemrange('d', 'z', inclusive=True):
print k,v
| bsd-3-clause |
CloudServer/nova | nova/tests/functional/v3/test_extension_info.py | 18 | 2310 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.api.openstack import extensions as api_extensions
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
def fake_soft_extension_authorizer(extension_name, core=False):
def authorize(context, action=None):
return True
return authorize
class ExtensionInfoAllSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
all_extensions = True
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
@mock.patch.object(api_extensions, 'os_compute_soft_authorizer')
def test_list_extensions(self, soft_auth):
soft_auth.side_effect = fake_soft_extension_authorizer
response = self._do_get('extensions')
subs = self._get_regexes()
template = 'extensions-list-resp'
if self._test == 'v2':
template = 'extensions-list-resp-v2'
self._verify_response(template, subs, response, 200)
class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
sample_dir = "extension-info"
extra_extensions_to_load = ["os-create-backup"]
@mock.patch.object(api_extensions, 'os_compute_soft_authorizer')
def test_get_extensions(self, soft_auth):
soft_auth.side_effect = fake_soft_extension_authorizer
response = self._do_get('extensions/os-create-backup')
subs = self._get_regexes()
self._verify_response('extensions-get-resp', subs, response, 200)
| apache-2.0 |
tlatzko/spmcluster | .tox/docs/lib/python2.7/site-packages/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/tests/napoleon/test_napoleon.py | 11 | 6556 | # -*- coding: utf-8 -*-
# Copyright 2014 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Tests for :mod:`sphinxcontrib.napoleon.__init__` module."""
try:
# Python >=3.3
from unittest.mock import Mock
except ImportError:
from mock import Mock
from collections import namedtuple
from sphinx.application import Sphinx
from sphinxcontrib.napoleon import (_process_docstring, _skip_member, Config,
setup)
from unittest import TestCase
def _private_doc():
"""module._private_doc.DOCSTRING"""
pass
def _private_undoc():
pass
def __special_doc__():
"""module.__special_doc__.DOCSTRING"""
pass
def __special_undoc__():
pass
class SampleClass(object):
def _private_doc(self):
"""SampleClass._private_doc.DOCSTRING"""
pass
def _private_undoc(self):
pass
def __special_doc__(self):
"""SampleClass.__special_doc__.DOCSTRING"""
pass
def __special_undoc__(self):
pass
class SampleError(Exception):
def _private_doc(self):
"""SampleError._private_doc.DOCSTRING"""
pass
def _private_undoc(self):
pass
def __special_doc__(self):
"""SampleError.__special_doc__.DOCSTRING"""
pass
def __special_undoc__(self):
pass
SampleNamedTuple = namedtuple('SampleNamedTuple', 'user_id block_type def_id')
class ProcessDocstringTest(TestCase):
def test_modify_in_place(self):
lines = ['Summary line.',
'',
'Args:',
' arg1: arg1 description']
app = Mock()
app.config = Config()
_process_docstring(app, 'class', 'SampleClass', SampleClass, Mock(),
lines)
expected = ['Summary line.',
'',
':param arg1: arg1 description',
'']
self.assertEqual(expected, lines)
class SetupTest(TestCase):
def test_unknown_app_type(self):
setup(object())
def test_add_config_values(self):
app = Mock(Sphinx)
setup(app)
for name, (default, rebuild) in Config._config_values.items():
has_config = False
for method_name, args, kwargs in app.method_calls:
if(method_name == 'add_config_value' and
args[0] == name):
has_config = True
if not has_config:
self.fail('Config value was not added to app %s' % name)
has_process_docstring = False
has_skip_member = False
for method_name, args, kwargs in app.method_calls:
if method_name == 'connect':
if(args[0] == 'autodoc-process-docstring' and
args[1] == _process_docstring):
has_process_docstring = True
elif(args[0] == 'autodoc-skip-member' and
args[1] == _skip_member):
has_skip_member = True
if not has_process_docstring:
self.fail('autodoc-process-docstring never connected')
if not has_skip_member:
self.fail('autodoc-skip-member never connected')
class SkipMemberTest(TestCase):
def assertSkip(self, what, member, obj, expect_skip, config_name):
skip = 'default skip'
app = Mock()
app.config = Config()
setattr(app.config, config_name, True)
if expect_skip:
self.assertEqual(skip, _skip_member(app, what, member, obj, skip,
Mock()))
else:
self.assertFalse(_skip_member(app, what, member, obj, skip,
Mock()))
setattr(app.config, config_name, False)
self.assertEqual(skip, _skip_member(app, what, member, obj, skip,
Mock()))
def test_namedtuple(self):
self.assertSkip('class', '_asdict',
SampleNamedTuple._asdict, False,
'napoleon_include_private_with_doc')
def test_class_private_doc(self):
self.assertSkip('class', '_private_doc',
SampleClass._private_doc, False,
'napoleon_include_private_with_doc')
def test_class_private_undoc(self):
self.assertSkip('class', '_private_undoc',
SampleClass._private_undoc, True,
'napoleon_include_private_with_doc')
def test_class_special_doc(self):
self.assertSkip('class', '__special_doc__',
SampleClass.__special_doc__, False,
'napoleon_include_special_with_doc')
def test_class_special_undoc(self):
self.assertSkip('class', '__special_undoc__',
SampleClass.__special_undoc__, True,
'napoleon_include_special_with_doc')
def test_exception_private_doc(self):
self.assertSkip('exception', '_private_doc',
SampleError._private_doc, False,
'napoleon_include_private_with_doc')
def test_exception_private_undoc(self):
self.assertSkip('exception', '_private_undoc',
SampleError._private_undoc, True,
'napoleon_include_private_with_doc')
def test_exception_special_doc(self):
self.assertSkip('exception', '__special_doc__',
SampleError.__special_doc__, False,
'napoleon_include_special_with_doc')
def test_exception_special_undoc(self):
self.assertSkip('exception', '__special_undoc__',
SampleError.__special_undoc__, True,
'napoleon_include_special_with_doc')
def test_module_private_doc(self):
self.assertSkip('module', '_private_doc', _private_doc, False,
'napoleon_include_private_with_doc')
def test_module_private_undoc(self):
self.assertSkip('module', '_private_undoc', _private_undoc, True,
'napoleon_include_private_with_doc')
def test_module_special_doc(self):
self.assertSkip('module', '__special_doc__', __special_doc__, False,
'napoleon_include_special_with_doc')
def test_module_special_undoc(self):
self.assertSkip('module', '__special_undoc__', __special_undoc__, True,
'napoleon_include_special_with_doc')
| bsd-2-clause |
lyarwood/sosreport | sos/plugins/rhui.py | 12 | 1382 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
class Rhui(Plugin, RedHatPlugin):
"""Red Hat Update Infrastructure
"""
plugin_name = 'rhui'
profiles = ('sysmgmt',)
rhui_debug_path = "/usr/share/rh-rhua/rhui-debug.py"
packages = ["rh-rhui-tools"]
files = [rhui_debug_path]
def setup(self):
if self.is_installed("pulp-cds"):
cds = "--cds"
else:
cds = ""
rhui_debug_dst_path = self.get_cmd_output_path()
self.add_cmd_output(
"python %s %s --dir %s"
% (self.rhui_debug_path, cds, rhui_debug_dst_path),
suggest_filename="rhui-debug")
return
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.