text
stringlengths 4
1.02M
| meta
dict |
---|---|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
start_nodes,
)
class NamedArgumentTest(BitcoinTestFramework):
"""
Test named arguments on RPC calls.
"""
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
self.is_network_split = False
self.sync_all()
def run_test(self):
node = self.nodes[0]
h = node.help(command='getinfo')
assert(h.startswith('getinfo\n'))
assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| {
"content_hash": "01836b9821b8dd5e63ce7d5385ed50e6",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 92,
"avg_line_length": 29.926829268292682,
"alnum_prop": 0.6030969845150774,
"repo_name": "Cocosoft/bitcoin",
"id": "f9a40955c06c2b4411d34d23d001f58f703e5b76",
"size": "1437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/rpcnamedargs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "693313"
},
{
"name": "C++",
"bytes": "5024468"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "186416"
},
{
"name": "Makefile",
"bytes": "109222"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1142361"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "48894"
}
],
"symlink_target": ""
} |
import os
import subprocess
import sys
def data_path(path):
return os.path.join(os.path.dirname(globals()["__file__"]), 'data', path)
def fuzz_data_path(path):
return os.path.join(os.path.dirname(globals()["__file__"]), 'fuzz_data', path)
def run_command(args, verbose = False):
"""Runs the command and returns the status and the output."""
if verbose:
sys.stderr.write("Running: %s\n" % command)
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdin, stdout = (p.stdin, p.stdout)
output = stdout.read()
output = output.strip(b'\n')
status = stdin.close()
stdout.close()
p.wait()
return p.returncode, output
| {
"content_hash": "800f3746d27164e342cd8be7ce2bfe10",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 31.5,
"alnum_prop": 0.6536796536796536,
"repo_name": "wooster/biplist",
"id": "fbc22e7faab8dd333349bf03972d14cde7ca8c9f",
"size": "718",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69496"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from systrace.tracing_agents import atrace_agent
from telemetry.core import exceptions
from telemetry.internal.platform import tracing_agent
from tracing.trace_data import trace_data
from devil.android.sdk import version_codes
class AtraceTracingAgent(tracing_agent.TracingAgent):
def __init__(self, platform_backend, config):
super().__init__(platform_backend, config)
self._device = platform_backend.device
self._categories = None
self._atrace_agent = atrace_agent.AtraceAgent(
platform_backend.device.build_version_sdk,
platform_backend.device.tracing_path)
self._config = None
@classmethod
def IsSupported(cls, platform_backend):
return (platform_backend.GetOSName() == 'android' and
platform_backend.device.build_version_sdk >
version_codes.JELLY_BEAN_MR1)
def StartAgentTracing(self, config, timeout):
if not config.enable_atrace_trace:
return False
app_name = (','.join(config.atrace_config.app_name) if isinstance(
config.atrace_config.app_name, list) else config.atrace_config.app_name)
self._config = atrace_agent.AtraceConfig(
config.atrace_config.categories,
trace_buf_size=None, kfuncs=None, app_name=app_name,
compress_trace_data=True, from_file=True,
device_serial_number=str(self._device), trace_time=None,
target='android')
return self._atrace_agent.StartAgentTracing(self._config, timeout)
def StopAgentTracing(self):
self._atrace_agent.StopAgentTracing()
def SupportsExplicitClockSync(self):
return self._atrace_agent.SupportsExplicitClockSync()
def RecordClockSyncMarker(self, sync_id,
record_controller_clocksync_marker_callback):
return self._atrace_agent.RecordClockSyncMarker(
sync_id, lambda t, sid: record_controller_clocksync_marker_callback(
sid, t))
def CollectAgentTraceData(self, trace_data_builder, timeout=None):
results = self._atrace_agent.GetResults(timeout)
if results is False:
raise exceptions.AtraceTracingError(
'Timed out retrieving the atrace tracing data from device %s.'
% self._device)
trace_data_builder.AddTraceFor(
trace_data.ATRACE_PART, six.ensure_str(results.raw_data),
allow_unstructured=True)
| {
"content_hash": "8156af31cc83cee24f565757296eb9c6",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 80,
"avg_line_length": 38.25806451612903,
"alnum_prop": 0.709106239460371,
"repo_name": "catapult-project/catapult",
"id": "1a2a0038d8a30ed8d7ea425989bf811c11422cbd",
"size": "2535",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/internal/platform/tracing_agent/atrace_tracing_agent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
try:
file = open('eeee', 'r+')
except Exception as e:
print('there is no file named as eeeee')
response = input('do you want to create a new file')
if response =='y':
file = open('eeee','w')
else:
pass
else:
file.write('ssss')
file.close()
| {
"content_hash": "18bc90dc71aa5bfbbf0715967b582902",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 20.142857142857142,
"alnum_prop": 0.5673758865248227,
"repo_name": "tencrance/cool-config",
"id": "906630efea851929998132c79c870a5e9c3f0d77",
"size": "497",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ml_keras_learn/tutorials/basic/28_try.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4434"
},
{
"name": "Shell",
"bytes": "1564"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
from uwsgi import cache_update, cache_get, cache_del, mule_msg, mule_get_msg
from . import offload
from .base import BaseCache, SERIALIZERS
dumps, loads = SERIALIZERS['pickle']
class UWSGICache(BaseCache):
def __init__(self, name):
self.name = name
def get(self, key):
return cache_get(key, self.name)
def mget(self, keys):
name = self.name
return [cache_get(key, name) for key in keys]
def set(self, key, value, ttl):
cache_update(key, value, ttl, self.name)
def delete(self, key):
cache_del(key, self.name)
def offloader(mule=None):
@offload.offloader
def do_offload(params):
mule_msg(dumps(params), mule)
return do_offload
def offload_worker(offload_cache):
def worker():
print('Offload worker started')
while True:
payload = mule_get_msg()
params = loads(payload)
offload_cache.offload_helper(params)
return worker
| {
"content_hash": "0d591702f412cc338521ce59baa45ff6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 24.069767441860463,
"alnum_prop": 0.6328502415458938,
"repo_name": "baverman/cachel",
"id": "7d06c3dac12513fb326c8c13005059233f1d5b16",
"size": "1035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cachel/uwsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52503"
}
],
"symlink_target": ""
} |
"""
This file convert dataset from http://mmlab.ie.cuhk.edu.hk/archive/CNN_FacePoint.htm
We convert data for LEVEL-3 training data.
all data are formated as (patch, delta landmark), and delta landmark is ((x1, y1), (x2, y2)...)
"""
import os
from os.path import join, exists
import time
from collections import defaultdict
import cv2
import numpy as np
import h5py
from common import logger, createDir, getDataFromTxt, getPatch, processImage
from common import shuffle_in_unison_scary
from utils import randomShift, randomShiftWithArgument
types = [(0, 'LE1', 0.11),
(0, 'LE2', 0.12),
(1, 'RE1', 0.11),
(1, 'RE2', 0.12),
(2, 'N1', 0.11),
(2, 'N2', 0.12),
(3, 'LM1', 0.11),
(3, 'LM2', 0.12),
(4, 'RM1', 0.11),
(4, 'RM2', 0.12),]
for t in types:
d = 'train/3_%s' % t[1]
createDir(d)
def generate(ftxt, mode, argument=False):
"""
Generate Training Data for LEVEL-3
mode = train or test
"""
data = getDataFromTxt(ftxt)
trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
for (imgPath, bbox, landmarkGt) in data:
img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
assert(img is not None)
logger("process %s" % imgPath)
landmarkPs = randomShiftWithArgument(landmarkGt, 0.01)
if not argument:
landmarkPs = [landmarkPs[0]]
for landmarkP in landmarkPs:
for idx, name, padding in types:
patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)
patch = cv2.resize(patch, (15, 15))
patch = patch.reshape((1, 15, 15))
trainData[name]['patches'].append(patch)
_ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
trainData[name]['landmarks'].append(_)
for idx, name, padding in types:
logger('writing training data of %s'%name)
patches = np.asarray(trainData[name]['patches'])
landmarks = np.asarray(trainData[name]['landmarks'])
patches = processImage(patches)
shuffle_in_unison_scary(patches, landmarks)
with h5py.File('train/3_%s/%s.h5'%(name, mode), 'w') as h5:
h5['data'] = patches.astype(np.float32)
h5['landmark'] = landmarks.astype(np.float32)
with open('train/3_%s/%s.txt'%(name, mode), 'w') as fd:
fd.write('train/3_%s/%s.h5'%(name, mode))
if __name__ == '__main__':
np.random.seed(int(time.time()))
# trainImageList.txt
generate('dataset/train/trainImageList.txt', 'train', argument=True)
# testImageList.txt
generate('dataset/train/testImageList.txt', 'test')
# Done
| {
"content_hash": "7144c45a27aad593f9b4751f4f035778",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 99,
"avg_line_length": 34.1125,
"alnum_prop": 0.5932576035177721,
"repo_name": "luoyetx/deep-landmark",
"id": "4c767c8276ccdb89a8d4291c760b85fba7f9d92a",
"size": "2770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataset/level3.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2656"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "47565"
},
{
"name": "Shell",
"bytes": "1043"
}
],
"symlink_target": ""
} |
"""
Tests for the API /portgroups/ methods.
"""
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import http_client
from six.moves.urllib import parse as urlparse
from testtools.matchers import HasLength
from wsme import types as wtypes
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import notification_utils
from ironic.api.controllers.v1 import portgroup as api_portgroup
from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import exception
from ironic.common import utils as common_utils
from ironic.conductor import rpcapi
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic.tests import base
from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as apiutils
from ironic.tests.unit.objects import utils as obj_utils
class TestPortgroupObject(base.TestCase):
def test_portgroup_init(self):
portgroup_dict = apiutils.portgroup_post_data(node_id=None)
del portgroup_dict['extra']
portgroup = api_portgroup.Portgroup(**portgroup_dict)
self.assertEqual(wtypes.Unset, portgroup.extra)
class TestListPortgroups(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.MAX_VER)}
def setUp(self):
super(TestListPortgroups, self).setUp()
self.node = obj_utils.create_test_node(self.context)
def test_empty(self):
data = self.get_json('/portgroups', headers=self.headers)
self.assertEqual([], data['portgroups'])
def test_one(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
data = self.get_json('/portgroups', headers=self.headers)
self.assertEqual(portgroup.uuid, data['portgroups'][0]["uuid"])
self.assertEqual(portgroup.address, data['portgroups'][0]["address"])
self.assertEqual(portgroup.name, data['portgroups'][0]['name'])
self.assertNotIn('extra', data['portgroups'][0])
self.assertNotIn('node_uuid', data['portgroups'][0])
# never expose the node_id
self.assertNotIn('node_id', data['portgroups'][0])
self.assertNotIn('standalone_ports_supported', data['portgroups'][0])
def test_get_one(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
data = self.get_json('/portgroups/%s' % portgroup.uuid,
headers=self.headers)
self.assertEqual(portgroup.uuid, data['uuid'])
self.assertIn('extra', data)
self.assertIn('node_uuid', data)
self.assertIn('standalone_ports_supported', data)
# never expose the node_id
self.assertNotIn('node_id', data)
def test_get_one_custom_fields(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
fields = 'address,extra'
data = self.get_json(
'/portgroups/%s?fields=%s' % (portgroup.uuid, fields),
headers=self.headers)
# We always append "links"
self.assertItemsEqual(['address', 'extra', 'links'], data)
def test_get_one_mode_field_lower_api_version(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
headers = {api_base.Version.string: '1.25'}
fields = 'address,mode'
response = self.get_json(
'/portgroups/%s?fields=%s' % (portgroup.uuid, fields),
headers=headers, expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertEqual('application/json', response.content_type)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
for i in range(3):
obj_utils.create_test_portgroup(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % i,
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json(
'/portgroups?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['portgroups']))
for portgroup in data['portgroups']:
# We always append "links"
self.assertItemsEqual(['uuid', 'extra', 'links'], portgroup)
def test_get_collection_properties_field_lower_api_version(self):
obj_utils.create_test_portgroup(self.context, node_id=self.node.id)
headers = {api_base.Version.string: '1.25'}
fields = 'address,properties'
response = self.get_json(
'/portgroups/?fields=%s' % fields,
headers=headers, expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertEqual('application/json', response.content_type)
def test_get_custom_fields_invalid_fields(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
fields = 'uuid,spongebob'
response = self.get_json(
'/portgroups/%s?fields=%s' % (portgroup.uuid, fields),
headers=self.headers, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('spongebob', response.json['error_message'])
def test_get_one_invalid_api_version(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
response = self.get_json(
'/portgroups/%s' % (portgroup.uuid),
headers={api_base.Version.string: str(api_v1.MIN_VER)},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_detail(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
data = self.get_json('/portgroups/detail', headers=self.headers)
self.assertEqual(portgroup.uuid, data['portgroups'][0]["uuid"])
self.assertIn('extra', data['portgroups'][0])
self.assertIn('node_uuid', data['portgroups'][0])
self.assertIn('standalone_ports_supported', data['portgroups'][0])
# never expose the node_id
self.assertNotIn('node_id', data['portgroups'][0])
def test_detail_invalid_api_version(self):
response = self.get_json(
'/portgroups/detail',
headers={api_base.Version.string: str(api_v1.MIN_VER)},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_detail_against_single(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
response = self.get_json('/portgroups/%s/detail' % portgroup.uuid,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_many(self):
portgroups = []
for id_ in range(5):
portgroup = obj_utils.create_test_portgroup(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % id_,
address='52:54:00:cf:2d:3%s' % id_)
portgroups.append(portgroup.uuid)
data = self.get_json('/portgroups', headers=self.headers)
self.assertEqual(len(portgroups), len(data['portgroups']))
uuids = [n['uuid'] for n in data['portgroups']]
six.assertCountEqual(self, portgroups, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_portgroup(self.context,
uuid=uuid,
node_id=self.node.id)
data = self.get_json('/portgroups/%s' % uuid, headers=self.headers)
self.assertIn('links', data)
self.assertIn('ports', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for l in data['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
portgroups = []
for id_ in range(5):
portgroup = obj_utils.create_test_portgroup(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % id_,
address='52:54:00:cf:2d:3%s' % id_)
portgroups.append(portgroup.uuid)
data = self.get_json('/portgroups/?limit=3', headers=self.headers)
self.assertEqual(3, len(data['portgroups']))
next_marker = data['portgroups'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
portgroups = []
for id_ in range(5):
portgroup = obj_utils.create_test_portgroup(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % id_,
address='52:54:00:cf:2d:3%s' % id_)
portgroups.append(portgroup.uuid)
data = self.get_json('/portgroups', headers=self.headers)
self.assertEqual(3, len(data['portgroups']))
next_marker = data['portgroups'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_ports_subresource(self):
pg = obj_utils.create_test_portgroup(self.context,
uuid=uuidutils.generate_uuid(),
node_id=self.node.id)
for id_ in range(2):
obj_utils.create_test_port(self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
portgroup_id=pg.id,
address='52:54:00:cf:2d:3%s' % id_)
data = self.get_json('/portgroups/%s/ports' % pg.uuid,
headers=self.headers)
self.assertEqual(2, len(data['ports']))
self.assertNotIn('next', data.keys())
data = self.get_json('/portgroups/%s/ports/detail' % pg.uuid,
headers=self.headers)
self.assertEqual(2, len(data['ports']))
self.assertNotIn('next', data.keys())
# Test collection pagination
data = self.get_json('/portgroups/%s/ports?limit=1' % pg.uuid,
headers=self.headers)
self.assertEqual(1, len(data['ports']))
self.assertIn('next', data.keys())
# Test get one old api version, /portgroups controller not allowed
response = self.get_json('/portgroups/%s/ports/%s' % (
pg.uuid, uuidutils.generate_uuid()),
headers={api_base.Version.string: str(api_v1.MIN_VER)},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
# Test get one not allowed to access to /portgroups/<uuid>/ports/<uuid>
response = self.get_json(
'/portgroups/%s/ports/%s' % (pg.uuid, uuidutils.generate_uuid()),
headers={api_base.Version.string: str(api_v1.MAX_VER)},
expect_errors=True)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
def test_ports_subresource_no_portgroups_allowed(self):
pg = obj_utils.create_test_portgroup(self.context,
uuid=uuidutils.generate_uuid(),
node_id=self.node.id)
for id_ in range(2):
obj_utils.create_test_port(self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
portgroup_id=pg.id,
address='52:54:00:cf:2d:3%s' % id_)
response = self.get_json('/portgroups/%s/ports' % pg.uuid,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
def test_get_all_ports_by_portgroup_uuid(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
portgroup_id=pg.id)
data = self.get_json('/portgroups/%s/ports' % pg.uuid,
headers={api_base.Version.string: '1.24'})
self.assertEqual(port.uuid, data['ports'][0]['uuid'])
def test_ports_subresource_not_allowed(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
response = self.get_json('/portgroups/%s/ports' % pg.uuid,
expect_errors=True,
headers={api_base.Version.string: '1.23'})
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertIn('The resource could not be found.',
response.json['error_message'])
def test_ports_subresource_portgroup_not_found(self):
non_existent_uuid = 'eeeeeeee-cccc-aaaa-bbbb-cccccccccccc'
response = self.get_json('/portgroups/%s/ports' % non_existent_uuid,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertIn('Portgroup %s could not be found.' % non_existent_uuid,
response.json['error_message'])
def test_portgroup_by_address(self):
address_template = "aa:bb:cc:dd:ee:f%d"
for id_ in range(3):
obj_utils.create_test_portgroup(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % id_,
address=address_template % id_)
target_address = address_template % 1
data = self.get_json('/portgroups?address=%s' % target_address,
headers=self.headers)
self.assertThat(data['portgroups'], HasLength(1))
self.assertEqual(target_address, data['portgroups'][0]['address'])
def test_portgroup_get_all_invalid_api_version(self):
obj_utils.create_test_portgroup(
self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(),
name='portgroup_1')
response = self.get_json('/portgroups',
headers={api_base.Version.string: '1.14'},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_portgroup_by_address_non_existent_address(self):
# non-existent address
data = self.get_json('/portgroups?address=%s' % 'aa:bb:cc:dd:ee:ff',
headers=self.headers)
self.assertThat(data['portgroups'], HasLength(0))
def test_portgroup_by_address_invalid_address_format(self):
obj_utils.create_test_portgroup(self.context, node_id=self.node.id)
invalid_address = 'invalid-mac-format'
response = self.get_json('/portgroups?address=%s' % invalid_address,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_address, response.json['error_message'])
def test_sort_key(self):
portgroups = []
for id_ in range(3):
portgroup = obj_utils.create_test_portgroup(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % id_,
address='52:54:00:cf:2d:3%s' % id_)
portgroups.append(portgroup.uuid)
data = self.get_json('/portgroups?sort_key=uuid', headers=self.headers)
uuids = [n['uuid'] for n in data['portgroups']]
self.assertEqual(sorted(portgroups), uuids)
def test_sort_key_invalid(self):
invalid_keys_list = ['foo', 'extra', 'internal_info', 'properties']
for invalid_key in invalid_keys_list:
response = self.get_json('/portgroups?sort_key=%s' % invalid_key,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_key, response.json['error_message'])
def _test_sort_key_allowed(self, detail=False):
portgroup_uuids = []
for id_ in range(3, 0, -1):
portgroup = obj_utils.create_test_portgroup(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % id_,
address='52:54:00:cf:2d:3%s' % id_,
mode='mode_%s' % id_)
portgroup_uuids.append(portgroup.uuid)
portgroup_uuids.reverse()
detail_str = '/detail' if detail else ''
data = self.get_json('/portgroups%s?sort_key=mode' % detail_str,
headers=self.headers)
data_uuids = [p['uuid'] for p in data['portgroups']]
self.assertEqual(portgroup_uuids, data_uuids)
def test_sort_key_allowed(self):
self._test_sort_key_allowed()
def test_detail_sort_key_allowed(self):
self._test_sort_key_allowed(detail=True)
def _test_sort_key_not_allowed(self, detail=False):
headers = {api_base.Version.string: '1.25'}
detail_str = '/detail' if detail else ''
response = self.get_json('/portgroups%s?sort_key=mode' % detail_str,
headers=headers, expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertEqual('application/json', response.content_type)
def test_sort_key_not_allowed(self):
self._test_sort_key_not_allowed()
def test_detail_sort_key_not_allowed(self):
self._test_sort_key_not_allowed(detail=True)
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/portgroups specifying node_name - success
mock_get_rpc_node.return_value = self.node
for i in range(5):
if i < 3:
node_id = self.node.id
else:
node_id = 100000 + i
obj_utils.create_test_portgroup(
self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
name='portgroup%s' % i,
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json("/portgroups?node=%s" % 'test-node',
headers=self.headers)
self.assertEqual(3, len(data['portgroups']))
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_uuid_ok(self, mock_get_rpc_node):
mock_get_rpc_node.return_value = self.node
obj_utils.create_test_portgroup(self.context, node_id=self.node.id)
data = self.get_json('/portgroups/detail?node=%s' % (self.node.uuid),
headers=self.headers)
mock_get_rpc_node.assert_called_once_with(self.node.uuid)
self.assertEqual(1, len(data['portgroups']))
@mock.patch.object(api_utils, 'get_rpc_node')
def test_detail_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/portgroups/detail specifying node_name - success
mock_get_rpc_node.return_value = self.node
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
data = self.get_json('/portgroups/detail?node=%s' % 'test-node',
headers=self.headers)
self.assertEqual(portgroup.uuid, data['portgroups'][0]['uuid'])
self.assertEqual(self.node.uuid, data['portgroups'][0]['node_uuid'])
@mock.patch.object(rpcapi.ConductorAPI, 'update_portgroup')
class TestPatch(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.MAX_VER)}
def setUp(self):
super(TestPatch, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(p.stop)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_update_byid(self, mock_notify, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid)])
def test_update_byname(self, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.name,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
def test_update_invalid_name(self, mock_upd):
mock_upd.return_value = self.portgroup
response = self.patch_json('/portgroups/%s' % self.portgroup.name,
[{'path': '/name',
'value': 'aa:bb_cc',
'op': 'replace'}],
headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
def test_update_byid_invalid_api_version(self, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
headers = {api_base.Version.string: '1.14'}
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
headers=headers,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_update_byaddress_not_allowed(self, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.address,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn(self.portgroup.address, response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_not_found(self, mock_upd):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/portgroups/%s' % uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_singular(self, mock_upd):
address = 'aa:bb:cc:dd:ee:ff'
mock_upd.return_value = self.portgroup
mock_upd.return_value.address = address
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(address, response.json['address'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(address, kargs.address)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_replace_address_already_exist(self, mock_notify, mock_upd):
address = 'aa:aa:aa:aa:aa:aa'
mock_upd.side_effect = exception.MACAlreadyExists(mac=address)
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CONFLICT, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(address, kargs.address)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid)])
def test_replace_node_uuid(self, mock_upd):
mock_upd.return_value = self.portgroup
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_add_node_uuid(self, mock_upd):
mock_upd.return_value = self.portgroup
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_add_node_id(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'add'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_node_id(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_remove_node_id(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/node_id',
'op': 'remove'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_non_existent_node_uuid(self, mock_upd):
node_uuid = '12506333-a81c-4d59-9987-889ed5f8687b'
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/node_uuid',
'value': node_uuid,
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertIn(node_uuid, response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
self.portgroup.extra = extra
self.portgroup.save()
# mutate extra so we replace all of them
extra = dict((k, extra[k] + 'x') for k in extra.keys())
patch = []
for k in extra.keys():
patch.append({'path': '/extra/%s' % k,
'value': extra[k],
'op': 'replace'})
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
patch, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
def test_remove_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
self.portgroup.extra = extra
self.portgroup.save()
# Removing one item from the collection
extra.pop('foo1')
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/extra/foo1',
'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
# Removing the collection
extra = {}
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/extra', 'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual({}, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
# Assert nothing else was changed
self.assertEqual(self.portgroup.uuid, response.json['uuid'])
self.assertEqual(self.portgroup.address, response.json['address'])
def test_remove_non_existent_property_fail(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/extra/non-existent',
'op': 'remove'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_remove_address(self, mock_upd):
mock_upd.return_value = self.portgroup
mock_upd.return_value.address = None
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/address',
'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertIsNone(response.json['address'])
self.assertTrue(mock_upd.called)
def test_add_root(self, mock_upd):
address = 'aa:bb:cc:dd:ee:ff'
mock_upd.return_value = self.portgroup
mock_upd.return_value.address = address
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/address',
'value': address,
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(address, response.json['address'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(address, kargs.address)
def test_add_root_non_existent(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_add_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
patch = []
for k in extra.keys():
patch.append({'path': '/extra/%s' % k,
'value': extra[k],
'op': 'add'})
mock_upd.return_value = self.portgroup
mock_upd.return_value.extra = extra
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
patch, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
def test_remove_uuid(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/uuid',
'op': 'remove'}],
expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_address_invalid_format(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/address',
'value': 'invalid-format',
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_portgroup_address_normalized(self, mock_upd):
address = 'AA:BB:CC:DD:EE:FF'
mock_upd.return_value = self.portgroup
mock_upd.return_value.address = address.lower()
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(address.lower(), response.json['address'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(address.lower(), kargs.address)
def test_update_portgroup_standalone_ports_supported(self, mock_upd):
mock_upd.return_value = self.portgroup
mock_upd.return_value.standalone_ports_supported = False
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/standalone_ports_supported',
'value': False,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertIs(False, response.json['standalone_ports_supported'])
def test_update_portgroup_standalone_ports_supported_bad_api_version(
self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/standalone_ports_supported',
'value': False,
'op': 'replace'}],
expect_errors=True,
headers={api_base.Version.string:
str(api_v1.MIN_VER)})
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_portgroup_internal_info_not_allowed(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/internal_info',
'value': False,
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_portgroup_mode_properties(self, mock_upd):
mock_upd.return_value = self.portgroup
mock_upd.return_value.mode = '802.3ad'
mock_upd.return_value.properties = {'bond_param': '100'}
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/mode',
'value': '802.3ad',
'op': 'add'},
{'path': '/properties/bond_param',
'value': '100',
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual('802.3ad', response.json['mode'])
self.assertEqual({'bond_param': '100'}, response.json['properties'])
def _test_update_portgroup_mode_properties_bad_api_version(self, patch,
mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
patch, expect_errors=True,
headers={api_base.Version.string: '1.25'})
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_portgroup_mode_properties_bad_api_version(self, mock_upd):
self._test_update_portgroup_mode_properties_bad_api_version(
[{'path': '/mode', 'op': 'add', 'value': '802.3ad'}], mock_upd)
self._test_update_portgroup_mode_properties_bad_api_version(
[{'path': '/properties/abc', 'op': 'add', 'value': 123}], mock_upd)
def test_remove_mode_not_allowed(self, mock_upd):
response = self.patch_json('/portgroups/%s' % self.portgroup.uuid,
[{'path': '/mode',
'op': 'remove'}],
expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
class TestPost(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.MAX_VER)}
def setUp(self):
super(TestPost, self).setUp()
self.node = obj_utils.create_test_node(self.context)
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id',
autospec=True)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_create_portgroup(self, mock_utcnow, mock_warn, mock_notify):
pdict = apiutils.post_get_test_portgroup()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/portgroups', pdict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(pdict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/portgroups/%s' % pdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
self.assertEqual(0, mock_warn.call_count)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid)])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_create_portgroup_v123(self, mock_utcnow):
pdict = apiutils.post_get_test_portgroup()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
headers = {api_base.Version.string: "1.23"}
response = self.post_json('/portgroups', pdict,
headers=headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=headers)
self.assertEqual(pdict['uuid'], result['uuid'])
self.assertEqual(pdict['node_uuid'], result['node_uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/portgroups/%s' % pdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
def test_create_portgroup_invalid_api_version(self):
pdict = apiutils.post_get_test_portgroup()
response = self.post_json(
'/portgroups', pdict, headers={api_base.Version.string: '1.14'},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_create_portgroup_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_portgroup',
wraps=self.dbapi.create_portgroup) as cp_mock:
pdict = apiutils.post_get_test_portgroup(extra={'foo': 123})
self.post_json('/portgroups', pdict, headers=self.headers)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(pdict['extra'], result['extra'])
cp_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cp_mock.call_args[0][0])
@mock.patch.object(notification_utils.LOG, 'exception', autospec=True)
@mock.patch.object(notification_utils.LOG, 'warning', autospec=True)
def test_create_portgroup_generate_uuid(self, mock_warn, mock_except):
pdict = apiutils.post_get_test_portgroup()
del pdict['uuid']
response = self.post_json('/portgroups', pdict, headers=self.headers)
result = self.get_json('/portgroups/%s' % response.json['uuid'],
headers=self.headers)
self.assertEqual(pdict['address'], result['address'])
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
self.assertFalse(mock_warn.called)
self.assertFalse(mock_except.called)
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(objects.Portgroup, 'create')
def test_create_portgroup_error(self, mock_create, mock_notify):
mock_create.side_effect = Exception()
pdict = apiutils.post_get_test_portgroup()
self.post_json('/portgroups', pdict, headers=self.headers,
expect_errors=True)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid)])
def test_create_portgroup_valid_extra(self):
pdict = apiutils.post_get_test_portgroup(
extra={'str': 'foo', 'int': 123, 'float': 0.1, 'bool': True,
'list': [1, 2], 'none': None, 'dict': {'cat': 'meow'}})
self.post_json('/portgroups', pdict, headers=self.headers)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(pdict['extra'], result['extra'])
@mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id',
autospec=True)
def test_create_portgroup_with_extra_vif_port_id_deprecated(
self, mock_warn):
pgdict = apiutils.post_get_test_portgroup(extra={'vif_port_id': 'foo'})
response = self.post_json('/portgroups', pgdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(1, mock_warn.call_count)
@mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id',
autospec=True)
def test_create_portgroup_with_no_extra(self, mock_warn):
pgdict = apiutils.post_get_test_portgroup()
del pgdict['extra']
response = self.post_json('/portgroups', pgdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(0, mock_warn.call_count)
def test_create_portgroup_no_address(self):
pdict = apiutils.post_get_test_portgroup()
del pdict['address']
self.post_json('/portgroups', pdict, headers=self.headers)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=self.headers)
self.assertIsNone(result['address'])
def test_create_portgroup_no_mandatory_field_node_uuid(self):
pdict = apiutils.post_get_test_portgroup()
del pdict['node_uuid']
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_portgroup_invalid_addr_format(self):
pdict = apiutils.post_get_test_portgroup(address='invalid-format')
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_portgroup_address_normalized(self):
address = 'AA:BB:CC:DD:EE:FF'
pdict = apiutils.post_get_test_portgroup(address=address)
self.post_json('/portgroups', pdict, headers=self.headers)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(address.lower(), result['address'])
def test_create_portgroup_with_hyphens_delimiter(self):
pdict = apiutils.post_get_test_portgroup()
colonsMAC = pdict['address']
hyphensMAC = colonsMAC.replace(':', '-')
pdict['address'] = hyphensMAC
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_portgroup_invalid_node_uuid_format(self):
pdict = apiutils.post_get_test_portgroup(node_uuid='invalid-format')
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_node_uuid_to_node_id_mapping(self):
pdict = apiutils.post_get_test_portgroup(node_uuid=self.node['uuid'])
self.post_json('/portgroups', pdict, headers=self.headers)
# GET doesn't return the node_id it's an internal value
portgroup = self.dbapi.get_portgroup_by_uuid(pdict['uuid'])
self.assertEqual(self.node['id'], portgroup.node_id)
def test_create_portgroup_node_uuid_not_found(self):
pdict = apiutils.post_get_test_portgroup(
node_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_portgroup_address_already_exist(self):
address = 'AA:AA:AA:11:22:33'
pdict = apiutils.post_get_test_portgroup(address=address)
self.post_json('/portgroups', pdict, headers=self.headers)
pdict['uuid'] = uuidutils.generate_uuid()
pdict['name'] = uuidutils.generate_uuid()
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.CONFLICT, response.status_int)
self.assertEqual('application/json', response.content_type)
error_msg = response.json['error_message']
self.assertTrue(error_msg)
self.assertIn(address, error_msg.upper())
def test_create_portgroup_name_ok(self):
address = 'AA:AA:AA:11:22:33'
name = 'foo'
pdict = apiutils.post_get_test_portgroup(address=address, name=name)
self.post_json('/portgroups', pdict, headers=self.headers)
result = self.get_json('/portgroups/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(name, result['name'])
def test_create_portgroup_name_invalid(self):
address = 'AA:AA:AA:11:22:33'
name = 'aa:bb_cc'
pdict = apiutils.post_get_test_portgroup(address=address, name=name)
response = self.post_json('/portgroups', pdict, headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_create_portgroup_internal_info_not_allowed(self):
pdict = apiutils.post_get_test_portgroup()
pdict['internal_info'] = 'info'
response = self.post_json('/portgroups', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_portgroup_mode_old_api_version(self):
for kwarg in [{'mode': '802.3ad'}, {'properties': {'bond_prop': 123}}]:
pdict = apiutils.post_get_test_portgroup(**kwarg)
response = self.post_json(
'/portgroups', pdict, expect_errors=True,
headers={api_base.Version.string: '1.25'})
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_portgroup_mode_properties(self):
mode = '802.3ad'
props = {'bond_prop': 123}
pdict = apiutils.post_get_test_portgroup(mode=mode, properties=props)
self.post_json('/portgroups', pdict,
headers={api_base.Version.string: '1.26'})
portgroup = self.dbapi.get_portgroup_by_uuid(pdict['uuid'])
self.assertEqual((mode, props), (portgroup.mode, portgroup.properties))
def test_create_portgroup_default_mode(self):
pdict = apiutils.post_get_test_portgroup()
self.post_json('/portgroups', pdict,
headers={api_base.Version.string: '1.26'})
portgroup = self.dbapi.get_portgroup_by_uuid(pdict['uuid'])
self.assertEqual('active-backup', portgroup.mode)
@mock.patch.object(rpcapi.ConductorAPI, 'destroy_portgroup')
class TestDelete(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.MAX_VER)}
def setUp(self):
super(TestDelete, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
gtf = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = gtf.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(gtf.stop)
def test_delete_portgroup_byaddress(self, mock_dpt):
response = self.delete('/portgroups/%s' % self.portgroup.address,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(self.portgroup.address, response.json['error_message'])
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_portgroup_byid(self, mock_notify, mock_dpt):
self.delete('/portgroups/%s' % self.portgroup.uuid,
headers=self.headers)
self.assertTrue(mock_dpt.called)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid)])
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_portgroup_node_locked(self, mock_notify, mock_dpt):
self.node.reserve(self.context, 'fake', self.node.uuid)
mock_dpt.side_effect = exception.NodeLocked(node='fake-node',
host='fake-host')
ret = self.delete('/portgroups/%s' % self.portgroup.uuid,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.CONFLICT, ret.status_code)
self.assertTrue(ret.json['error_message'])
self.assertTrue(mock_dpt.called)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid)])
def test_delete_portgroup_invalid_api_version(self, mock_dpt):
response = self.delete('/portgroups/%s' % self.portgroup.uuid,
expect_errors=True,
headers={api_base.Version.string: '1.14'})
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_delete_portgroup_byname(self, mock_dpt):
self.delete('/portgroups/%s' % self.portgroup.name,
headers=self.headers)
self.assertTrue(mock_dpt.called)
def test_delete_portgroup_byname_not_existed(self, mock_dpt):
res = self.delete('/portgroups/%s' % 'blah', expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
| {
"content_hash": "8dd097dbe5eae69d3e54e9a99f66608d",
"timestamp": "",
"source": "github",
"line_count": 1264,
"max_line_length": 79,
"avg_line_length": 50.263449367088604,
"alnum_prop": 0.5612358931578865,
"repo_name": "jiazichenzhan/Server_Manage_Plugin",
"id": "57f0b5284d0959783a9eb3b3d54059e05c9285d2",
"size": "64105",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ironic-plugin-pike/ironic/tests/unit/api/v1/test_portgroups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5720362"
},
{
"name": "Ruby",
"bytes": "986"
},
{
"name": "Shell",
"bytes": "128352"
}
],
"symlink_target": ""
} |
from Queue import Queue
from threading import Thread
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
super(self.__class__, self).__init__()
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
if func is None:
#None func is a sign to exit
self.tasks.task_done()
return
try:
ret = func(*args)
if kargs and 'callback' in kargs:
kargs['callback'](ret)
except Exception as e:
print type(e), e
finally:
self.tasks.task_done()
class ThreadPoolState(object):
IDLE = 1
CLOSED = 2
WAIT_JOIN = 3
class ThreadPoolError(Exception):
pass
class ThreadPool(object):
"""Pool of threads consuming tasks from a queue"""
def __init__(self, workers):
self.tasks = Queue()
self.workers = [Worker(self.tasks) for x in xrange(workers)]
self.state = ThreadPoolState.IDLE
def apply_async(self, func, args, **kargs):
"""Add a task to the queue"""
if self.state != ThreadPoolState.IDLE:
raise ThreadPoolError('ThreadPool cant accept any more tasks')
self.tasks.put((func, args, kargs))
def close(self):
self.state = ThreadPoolState.CLOSED
while not self.tasks.empty():
self.tasks.get_nowait()
self.tasks.task_done()
for worker in self.workers:
self.tasks.put((None, (), {}))
def join(self):
"""Wait for completion of all the tasks in the queue"""
self.state = ThreadPoolState.WAIT_JOIN
self.tasks.join()
| {
"content_hash": "eb858ef753d910b2257aaa3e35bd5a58",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 74,
"avg_line_length": 28.16923076923077,
"alnum_prop": 0.5548880393227744,
"repo_name": "goniz/buildscript",
"id": "3c259eca5d2ca3e79597e9cf0e5233f07620bc8b",
"size": "1851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build_system/thread_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "606"
},
{
"name": "C++",
"bytes": "130"
},
{
"name": "Python",
"bytes": "18791"
}
],
"symlink_target": ""
} |
import socket
import sys
import requests
import string
from scapy.all import *
r = requests.get("http://4.ipquail.com/ip")
if r.status_code == 200:
ipv4 = r.content.translate(None, string.whitespace)
else:
ipv4 = 'err'
message = "%s" % ipv4
server = '192.168.202.33'
fakesrc = '1.1.1.1'
srcport = RandNum(1024,65535)
mypacket = IP(dst=server, src=ipv4)/UDP(sport=srcport,dport=10000)/message
l2packet = Ether()/mypacket
sendp(l2packet)
srcport = RandNum(1024,65535)
mypacket = IP(dst=server, src=fakesrc)/UDP(sport=srcport,dport=10000)/message
l2packet = Ether()/mypacket
sendp(l2packet)
for x in range(0, 20):
srcport = RandNum(1024,65535)
fakesrc = RandIP()
mypacket = IP(dst=server, src=RandIP())/UDP(sport=srcport,dport=10000)/message
l2packet = Ether()/mypacket
sendp(l2packet)
| {
"content_hash": "d997f4215408c9f9d7c120489a057673",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 23.542857142857144,
"alnum_prop": 0.7026699029126213,
"repo_name": "tbaschak/bcp38-tests",
"id": "9b4c7a72e22c0832d2bac19b11d08cfd7d2c9429",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcp38-client-spoof.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3482"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_airline_passengers()
df = b1.mPastData
df.head()
lEngine = autof.cForecastEngine()
lEngine.mOptions.set_active_decomposition_types(['TSR']);
lEngine
H = b1.mHorizon;
# # lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mParallelMode = True;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots(name = "outputs/my_airline_passengers_TSR")
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
lForecastColumnName = b1.mSignalVar + '_Forecast'
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, lForecastColumnName , lForecastColumnName + '_Lower_Bound', lForecastColumnName + '_Upper_Bound' ]]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(2*H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| {
"content_hash": "25e1a3b6c33c23b9f9c58f86ec157519",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 154,
"avg_line_length": 27.914893617021278,
"alnum_prop": 0.743140243902439,
"repo_name": "antoinecarme/pyaf",
"id": "5f9e8f7caa387fa7729393020a4f4f2cb28f0ee9",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/multiplicative_seasonal/test_air_passengers_multiplicative_seasonal_TSR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from django.db import migrations
import uuid
def gen_uuid(apps, schema_editor):
MyModel1 = apps.get_model('tourism', 'touristiccontent')
MyModel2 = apps.get_model('tourism', 'touristicevent')
for row in MyModel1.objects.all():
row.uuid = uuid.uuid4()
row.save(update_fields=['uuid'])
for row in MyModel2.objects.all():
row.uuid = uuid.uuid4()
row.save(update_fields=['uuid'])
class Migration(migrations.Migration):
dependencies = [
('tourism', '0016_auto_20211022_1251'),
]
operations = [
migrations.RunPython(gen_uuid),
]
| {
"content_hash": "7607663b92ac9ab8d9383dee62ef69e3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 25.375,
"alnum_prop": 0.6338259441707718,
"repo_name": "makinacorpus/Geotrek",
"id": "a9871df2d21e4604660d5f2d612841586145e58b",
"size": "659",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/tourism/migrations/0017_auto_20211022_1255.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30638"
},
{
"name": "HTML",
"bytes": "141008"
},
{
"name": "JavaScript",
"bytes": "184508"
},
{
"name": "Makefile",
"bytes": "4170"
},
{
"name": "PLpgSQL",
"bytes": "85546"
},
{
"name": "Python",
"bytes": "2768434"
},
{
"name": "Shell",
"bytes": "18090"
}
],
"symlink_target": ""
} |
import axdebug, gateways
from util import _wrap, _wrap_remove, RaiseNotImpl
import cStringIO, traceback
from pprint import pprint
from win32com.server.exception import COMException
import winerror
import string
import sys
# Given an object, return a nice string
def MakeNiceString(ob):
stream = cStringIO.StringIO()
pprint(ob, stream)
return string.strip(stream.getvalue())
class ProvideExpressionContexts(gateways.ProvideExpressionContexts):
pass
class ExpressionContext(gateways.DebugExpressionContext):
def __init__(self, frame):
self.frame = frame
def ParseLanguageText(self, code, radix, delim, flags):
return _wrap(Expression(self.frame, code, radix, delim, flags), axdebug.IID_IDebugExpression)
def GetLanguageInfo(self):
# print "GetLanguageInfo"
return "Python", "{DF630910-1C1D-11d0-AE36-8C0F5E000000}"
class Expression(gateways.DebugExpression):
def __init__(self, frame, code, radix, delim, flags):
self.callback = None
self.frame = frame
self.code = code
self.radix = radix
self.delim = delim
self.flags = flags
self.isComplete = 0
self.result=None
self.hresult = winerror.E_UNEXPECTED
def Start(self, callback):
try:
try:
try:
self.result = eval(self.code, self.frame.f_globals, self.frame.f_locals)
except SyntaxError:
exec self.code in self.frame.f_globals, self.frame.f_locals
self.result = ""
self.hresult = 0
except:
l = traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1])
# l is a list of strings with trailing "\n"
self.result = string.join(map(lambda s:s[:-1], l), "\n")
self.hresult = winerror.E_FAIL
finally:
self.isComplete = 1
callback.onComplete()
def Abort(self):
print "** ABORT **"
def QueryIsComplete(self):
return self.isComplete
def GetResultAsString(self):
# print "GetStrAsResult returning", self.result
return self.hresult, MakeNiceString(self.result)
def GetResultAsDebugProperty(self):
result = _wrap(DebugProperty(self.code, self.result, None, self.hresult), axdebug.IID_IDebugProperty)
return self.hresult, result
def MakeEnumDebugProperty(object, dwFieldSpec, nRadix, iid, stackFrame = None):
name_vals = []
if hasattr(object, "items") and hasattr(object, "keys"): # If it is a dict.
name_vals = object.iteritems()
dictionary = object
elif hasattr(object, "__dict__"): #object with dictionary, module
name_vals = object.__dict__.iteritems()
dictionary = object.__dict__
infos = []
for name, val in name_vals:
infos.append(GetPropertyInfo(name, val, dwFieldSpec, nRadix, 0, dictionary, stackFrame))
return _wrap(EnumDebugPropertyInfo(infos), axdebug.IID_IEnumDebugPropertyInfo)
def GetPropertyInfo(obname, obvalue, dwFieldSpec, nRadix, hresult=0, dictionary = None, stackFrame = None):
# returns a tuple
name = typ = value = fullname = attrib = dbgprop = None
if dwFieldSpec & axdebug.DBGPROP_INFO_VALUE:
value = MakeNiceString(obvalue)
if dwFieldSpec & axdebug.DBGPROP_INFO_NAME:
name = obname
if dwFieldSpec & axdebug.DBGPROP_INFO_TYPE:
if hresult:
typ = "Error"
else:
try:
typ = type(obvalue).__name__
except AttributeError:
typ = str(type(obvalue))
if dwFieldSpec & axdebug.DBGPROP_INFO_FULLNAME:
fullname = obname
if dwFieldSpec & axdebug.DBGPROP_INFO_ATTRIBUTES:
if hasattr(obvalue, "has_key") or hasattr(obvalue, "__dict__"): # If it is a dict or object
attrib = axdebug.DBGPROP_ATTRIB_VALUE_IS_EXPANDABLE
else:
attrib = 0
if dwFieldSpec & axdebug.DBGPROP_INFO_DEBUGPROP:
dbgprop = _wrap(DebugProperty(name, obvalue, None, hresult, dictionary, stackFrame), axdebug.IID_IDebugProperty)
return name, typ, value, fullname, attrib, dbgprop
from win32com.server.util import ListEnumeratorGateway
class EnumDebugPropertyInfo(ListEnumeratorGateway):
"""A class to expose a Python sequence as an EnumDebugCodeContexts
Create an instance of this class passing a sequence (list, tuple, or
any sequence protocol supporting object) and it will automatically
support the EnumDebugCodeContexts interface for the object.
"""
_public_methods_ = ListEnumeratorGateway._public_methods_ + ["GetCount"]
_com_interfaces_ = [ axdebug.IID_IEnumDebugPropertyInfo]
def GetCount(self):
return len(self._list_)
def _wrap(self, ob):
return ob
class DebugProperty:
_com_interfaces_ = [axdebug.IID_IDebugProperty]
_public_methods_ = ['GetPropertyInfo', 'GetExtendedInfo', 'SetValueAsString',
'EnumMembers', 'GetParent'
]
def __init__(self, name, value, parent = None, hresult = 0, dictionary = None, stackFrame = None):
self.name = name
self.value = value
self.parent = parent
self.hresult = hresult
self.dictionary = dictionary
self.stackFrame = stackFrame
def GetPropertyInfo(self, dwFieldSpec, nRadix):
return GetPropertyInfo(self.name, self.value, dwFieldSpec, nRadix, self.hresult, dictionary, stackFrame)
def GetExtendedInfo(self): ### Note - not in the framework.
RaiseNotImpl("DebugProperty::GetExtendedInfo")
def SetValueAsString(self, value, radix):
if self.stackFrame and self.dictionary:
self.dictionary[self.name]= eval(value,self.stackFrame.f_globals, self.stackFrame.f_locals)
else:
RaiseNotImpl("DebugProperty::SetValueAsString")
def EnumMembers(self, dwFieldSpec, nRadix, iid):
# Returns IEnumDebugPropertyInfo
return MakeEnumDebugProperty(self.value, dwFieldSpec, nRadix, iid, self.stackFrame)
def GetParent(self):
# return IDebugProperty
RaiseNotImpl("DebugProperty::GetParent")
| {
"content_hash": "87155d50dd5245f8415a6c01e3e491f8",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 120,
"avg_line_length": 39.794871794871796,
"alnum_prop": 0.6531894329896907,
"repo_name": "nzavagli/UnrealPy",
"id": "6682ebfe5672b5c9aaf498bdf3d4a74dcd661346",
"size": "6208",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/com/win32comext/axdebug/expressions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
import copy
from future.utils import iteritems
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
from pandaharvester.harvestercore.plugin_factory import PluginFactory
from pandaharvester.harvestermisc.apfmon import Apfmon
# logger
_logger = core_utils.setup_logger('worker_adjuster')
# class to define number of workers to submit
class WorkerAdjuster(object):
# constructor
def __init__(self, queue_config_mapper):
self.queueConfigMapper = queue_config_mapper
self.pluginFactory = PluginFactory()
self.dbProxy = DBProxy()
self.throttlerMap = dict()
self.apf_mon = Apfmon(self.queueConfigMapper)
try:
self.maxNewWorkers = harvester_config.submitter.maxNewWorkers
except AttributeError:
self.maxNewWorkers = None
# define number of workers to submit based on various information
def define_num_workers(self, static_num_workers, site_name):
tmpLog = core_utils.make_logger(_logger, 'site={0}'.format(site_name), method_name='define_num_workers')
tmpLog.debug('start')
tmpLog.debug('static_num_workers: {0}'.format(static_num_workers))
dyn_num_workers = copy.deepcopy(static_num_workers)
try:
# get queue status
queueStat = self.dbProxy.get_cache("panda_queues.json", None)
if queueStat is None:
queueStat = dict()
else:
queueStat = queueStat.data
# get job statistics
job_stats = self.dbProxy.get_cache("job_statistics.json", None)
if job_stats is None:
job_stats = dict()
else:
job_stats = job_stats.data
# define num of new workers
for queueName in static_num_workers:
# get queue
queueConfig = self.queueConfigMapper.get_queue(queueName)
workerLimits_dict = self.dbProxy.get_worker_limits(queueName)
maxWorkers = workerLimits_dict.get('maxWorkers', 0)
nQueueLimit = workerLimits_dict.get('nQueueLimitWorker', 0)
nQueueLimitPerRT = workerLimits_dict['nQueueLimitWorkerPerRT']
nQueue_total, nReady_total, nRunning_total = 0, 0, 0
apf_msg = None
apf_data = None
for resource_type, tmpVal in iteritems(static_num_workers[queueName]):
tmpLog.debug('Processing queue {0} resource {1} with static_num_workers {2}'.
format(queueName, resource_type, tmpVal))
# set 0 to num of new workers when the queue is disabled
if queueName in queueStat and queueStat[queueName]['status'] in ['offline', 'standby',
'maintenance']:
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = 0
retMsg = 'set nNewWorkers=0 since status={0}'.format(queueStat[queueName]['status'])
tmpLog.debug(retMsg)
apf_msg = 'Not submitting workers since queue status = {0}'.format(queueStat[queueName]['status'])
continue
# protection against not-up-to-date queue config
if queueConfig is None:
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = 0
retMsg = 'set nNewWorkers=0 due to missing queueConfig'
tmpLog.debug(retMsg)
apf_msg = 'Not submitting workers because of missing queueConfig'
continue
# get throttler
if queueName not in self.throttlerMap:
if hasattr(queueConfig, 'throttler'):
throttler = self.pluginFactory.get_plugin(queueConfig.throttler)
else:
throttler = None
self.throttlerMap[queueName] = throttler
# check throttler
throttler = self.throttlerMap[queueName]
if throttler is not None:
toThrottle, tmpMsg = throttler.to_be_throttled(queueConfig)
if toThrottle:
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = 0
retMsg = 'set nNewWorkers=0 by {0}:{1}'.format(throttler.__class__.__name__, tmpMsg)
tmpLog.debug(retMsg)
continue
# check stats
nQueue = tmpVal['nQueue']
nReady = tmpVal['nReady']
nRunning = tmpVal['nRunning']
if resource_type != 'ANY':
nQueue_total += nQueue
nReady_total += nReady
nRunning_total += nRunning
if queueConfig.runMode == 'slave':
nNewWorkersDef = tmpVal['nNewWorkers']
if nNewWorkersDef == 0:
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = 0
retMsg = 'set nNewWorkers=0 by panda in slave mode'
tmpLog.debug(retMsg)
continue
else:
nNewWorkersDef = None
# define num of new workers based on static site config
nNewWorkers = 0
if nQueue >= nQueueLimitPerRT > 0:
# enough queued workers
retMsg = 'No nNewWorkers since nQueue({0})>=nQueueLimitPerRT({1})'.format(nQueue, nQueueLimitPerRT)
tmpLog.debug(retMsg)
pass
elif (nQueue + nReady + nRunning) >= maxWorkers > 0:
# enough workers in the system
retMsg = 'No nNewWorkers since nQueue({0}) + nReady({1}) + nRunning({2}) '.format(nQueue,
nReady,
nRunning)
retMsg += '>= maxWorkers({0})'.format(maxWorkers)
tmpLog.debug(retMsg)
pass
else:
maxQueuedWorkers = None
if nQueueLimitPerRT > 0: # there is a limit set for the queue
maxQueuedWorkers = nQueueLimitPerRT
# Reset the maxQueueWorkers according to particular
if nNewWorkersDef is not None: # don't surpass limits given centrally
maxQueuedWorkers_slave = nNewWorkersDef + nQueue
if maxQueuedWorkers is not None:
maxQueuedWorkers = min(maxQueuedWorkers_slave, maxQueuedWorkers)
else:
maxQueuedWorkers = maxQueuedWorkers_slave
elif queueConfig.mapType == 'NoJob': # for pull mode, limit to activated jobs
# limit the queue to the number of activated jobs to avoid empty pilots
try:
n_activated = max(job_stats[queueName]['activated'], 1) # avoid no activity queues
queue_limit = maxQueuedWorkers
maxQueuedWorkers = min(n_activated, maxQueuedWorkers)
tmpLog.debug('limiting maxQueuedWorkers to min(n_activated={0}, queue_limit={1})'.
format(n_activated, queue_limit))
except KeyError:
tmpLog.warning('n_activated not defined, defaulting to configured queue limits')
pass
if maxQueuedWorkers is None: # no value found, use default value
maxQueuedWorkers = 1
# new workers
nNewWorkers = max(maxQueuedWorkers - nQueue, 0)
tmpLog.debug('setting nNewWorkers to {0} in maxQueuedWorkers calculation'
.format(nNewWorkers))
if maxWorkers > 0:
nNewWorkers = min(nNewWorkers, max(maxWorkers - nQueue - nReady - nRunning, 0))
tmpLog.debug('setting nNewWorkers to {0} to respect maxWorkers'
.format(nNewWorkers))
if queueConfig.maxNewWorkersPerCycle > 0:
nNewWorkers = min(nNewWorkers, queueConfig.maxNewWorkersPerCycle)
tmpLog.debug('setting nNewWorkers to {0} in order to respect maxNewWorkersPerCycle'
.format(nNewWorkers))
if self.maxNewWorkers is not None and self.maxNewWorkers > 0:
nNewWorkers = min(nNewWorkers, self.maxNewWorkers)
tmpLog.debug('setting nNewWorkers to {0} in order to respect universal maxNewWorkers'
.format(nNewWorkers))
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = nNewWorkers
# adjust nNewWorkers for UCORE to let aggregations over RT respect nQueueLimitWorker and maxWorkers
if queueConfig is None:
maxNewWorkersPerCycle = 0
retMsg = 'set maxNewWorkersPerCycle=0 in UCORE aggregation due to missing queueConfig'
tmpLog.debug(retMsg)
else:
maxNewWorkersPerCycle = queueConfig.maxNewWorkersPerCycle
if len(dyn_num_workers[queueName]) > 1:
total_new_workers_rts = sum( dyn_num_workers[queueName][_rt]['nNewWorkers']
if _rt != 'ANY' else 0
for _rt in dyn_num_workers[queueName] )
nNewWorkers_max_agg = min(
max(nQueueLimit - nQueue_total, 0),
max(maxWorkers - nQueue_total - nReady_total - nRunning_total, 0),
)
if maxNewWorkersPerCycle >= 0:
nNewWorkers_max_agg = min(nNewWorkers_max_agg, maxNewWorkersPerCycle)
if self.maxNewWorkers is not None and self.maxNewWorkers > 0:
nNewWorkers_max_agg = min(nNewWorkers_max_agg, self.maxNewWorkers)
# exceeded max, to adjust
if total_new_workers_rts > nNewWorkers_max_agg:
if nNewWorkers_max_agg == 0:
for resource_type in dyn_num_workers[queueName]:
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = 0
tmpLog.debug('No nNewWorkers since nNewWorkers_max_agg=0 for UCORE')
else:
tmpLog.debug('nNewWorkers_max_agg={0} for UCORE'.format(nNewWorkers_max_agg))
_d = dyn_num_workers[queueName].copy()
del _d['ANY']
simple_rt_nw_list = [ [_rt, _d[_rt].get('nNewWorkers', 0), 0] for _rt in _d ]
_countdown = nNewWorkers_max_agg
for _rt_list in simple_rt_nw_list:
resource_type, nNewWorkers_orig, _r = _rt_list
nNewWorkers, remainder = divmod(nNewWorkers_orig*nNewWorkers_max_agg, total_new_workers_rts)
dyn_num_workers[queueName][resource_type]['nNewWorkers'] = nNewWorkers
_rt_list[2] = remainder
_countdown -= nNewWorkers
_s_list = sorted(simple_rt_nw_list, key=(lambda x: x[1]))
sorted_rt_nw_list = sorted(_s_list, key=(lambda x: x[2]), reverse=True)
for resource_type, nNewWorkers_orig, remainder in sorted_rt_nw_list:
if _countdown <= 0:
break
dyn_num_workers[queueName][resource_type]['nNewWorkers'] += 1
_countdown -= 1
for resource_type in dyn_num_workers[queueName]:
if resource_type == 'ANY':
continue
nNewWorkers = dyn_num_workers[queueName][resource_type]['nNewWorkers']
tmpLog.debug('setting nNewWorkers to {0} of type {1} in order to respect RT aggregations for UCORE'
.format(nNewWorkers, resource_type))
if not apf_msg:
apf_data = copy.deepcopy(dyn_num_workers[queueName])
self.apf_mon.update_label(queueName, apf_msg, apf_data)
# dump
tmpLog.debug('defined {0}'.format(str(dyn_num_workers)))
return dyn_num_workers
except Exception:
# dump error
errMsg = core_utils.dump_error_message(tmpLog)
return None
| {
"content_hash": "588c9d4ef2c53a7c33875669eeee5c5e",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 127,
"avg_line_length": 57.51652892561984,
"alnum_prop": 0.4984553488037934,
"repo_name": "dougbenjamin/panda-harvester",
"id": "22fd057e985145af0732d2a74c66d8b403a58622",
"size": "13919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandaharvester/harvesterbody/worker_adjuster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1540221"
},
{
"name": "Shell",
"bytes": "21117"
}
],
"symlink_target": ""
} |
from django import forms
from vendor.zebra.forms import StripePaymentForm
from django.utils.safestring import mark_safe
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from apps.profile.models import change_password, blank_authenticate
from apps.social.models import MSocialProfile
PLANS = [
("newsblur-premium-12", mark_safe("$12 / year <span class='NB-small'>($1/month)</span>")),
("newsblur-premium-24", mark_safe("$24 / year <span class='NB-small'>($2/month)</span>")),
("newsblur-premium-36", mark_safe("$36 / year <span class='NB-small'>($3/month)</span>")),
]
class HorizRadioRenderer(forms.RadioSelect.renderer):
""" this overrides widget method to put radio buttons horizontally
instead of vertically.
"""
def render(self):
"""Outputs radios"""
choices = '\n'.join(['%s\n' % w for w in self])
return mark_safe('<div class="NB-stripe-plan-choice">%s</div>' % choices)
class StripePlusPaymentForm(StripePaymentForm):
def __init__(self, *args, **kwargs):
email = kwargs.pop('email')
plan = kwargs.pop('plan', '')
super(StripePlusPaymentForm, self).__init__(*args, **kwargs)
self.fields['email'].initial = email
if plan:
self.fields['plan'].initial = plan
email = forms.EmailField(widget=forms.TextInput(attrs=dict(maxlength=75)),
label='Email address',
required=False)
plan = forms.ChoiceField(required=False, widget=forms.RadioSelect(renderer=HorizRadioRenderer),
choices=PLANS, label='Plan')
class DeleteAccountForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput(),
label="Confirm your password",
required=False)
confirm = forms.CharField(label="Type \"Delete\" to confirm",
widget=forms.TextInput(),
required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(DeleteAccountForm, self).__init__(*args, **kwargs)
def clean_password(self):
user_auth = authenticate(username=self.user.username,
password=self.cleaned_data['password'])
if not user_auth:
user_auth = blank_authenticate(username=self.user.username)
if not user_auth:
raise forms.ValidationError('Your password doesn\'t match.')
return self.cleaned_data
def clean_confirm(self):
if self.cleaned_data.get('confirm', "").lower() != "delete":
raise forms.ValidationError('Please type "DELETE" to confirm deletion.')
return self.cleaned_data
class ForgotPasswordForm(forms.Form):
email = forms.CharField(widget=forms.TextInput(),
label="Your email address",
required=False)
def __init__(self, *args, **kwargs):
super(ForgotPasswordForm, self).__init__(*args, **kwargs)
def clean_email(self):
if not self.cleaned_data['email']:
raise forms.ValidationError('Please enter in an email address.')
try:
User.objects.get(email__iexact=self.cleaned_data['email'])
except User.MultipleObjectsReturned:
pass
except User.DoesNotExist:
raise forms.ValidationError('No user has that email address.')
return self.cleaned_data
class ForgotPasswordReturnForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput(),
label="Your new password",
required=False)
class AccountSettingsForm(forms.Form):
username = forms.RegexField(regex=r'^\w+$',
max_length=30,
widget=forms.TextInput(attrs={'class': 'NB-input'}),
label='username',
required=False,
error_messages={
'invalid': "Your username may only contain letters and numbers."
})
email = forms.EmailField(widget=forms.TextInput(attrs={'maxlength': 75, 'class': 'NB-input'}),
label='email address',
required=True,
error_messages={'required': 'Please enter an email.'})
new_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'NB-input'}),
label='password',
required=False)
# error_messages={'required': 'Please enter a password.'})
old_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'NB-input'}),
label='password',
required=False)
# error_messages={'required': 'Please enter a password.'})
def __init__(self, user, *args, **kwargs):
self.user = user
super(AccountSettingsForm, self).__init__(*args, **kwargs)
def clean_username(self):
username = self.cleaned_data['username']
return username
def clean_password(self):
if not self.cleaned_data['password']:
return ""
return self.cleaned_data['password']
def clean_email(self):
return self.cleaned_data['email']
def clean(self):
username = self.cleaned_data.get('username', '')
new_password = self.cleaned_data.get('new_password', '')
old_password = self.cleaned_data.get('old_password', '')
email = self.cleaned_data.get('email', None)
if username and self.user.username != username:
try:
User.objects.get(username__iexact=username)
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("This username is already taken. Try something different.")
if self.user.email != email:
if email and User.objects.filter(email__iexact=email).count():
raise forms.ValidationError("This email is already being used by another account. Try something different.")
if old_password or new_password:
code = change_password(self.user, old_password, new_password, only_check=True)
if code <= 0:
raise forms.ValidationError("Your old password is incorrect.")
return self.cleaned_data
def save(self, profile_callback=None):
username = self.cleaned_data['username']
new_password = self.cleaned_data.get('new_password', None)
old_password = self.cleaned_data.get('old_password', None)
email = self.cleaned_data.get('email', None)
if username and self.user.username != username:
change_password(self.user, self.user.username, username)
self.user.username = username
self.user.save()
social_profile = MSocialProfile.get_user(self.user.pk)
social_profile.username = username
social_profile.save()
if self.user.email != email:
self.user.email = email
self.user.save()
if old_password or new_password:
change_password(self.user, old_password, new_password)
| {
"content_hash": "a737b4333a000f15cd5619b0e34bad5e",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 124,
"avg_line_length": 42.36666666666667,
"alnum_prop": 0.565827432467873,
"repo_name": "huihoo/reader",
"id": "545670bd56a795d418b9186df22e82f6a0ffc480",
"size": "7626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/profile/forms.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
print('main.py was successfully called')
import os
print('imported os')
print('this dir is', os.path.abspath(os.curdir))
print('contents of this dir', os.listdir('./'))
import sys
print('pythonpath is', sys.path)
import kivy
print('imported kivy')
print('file is', kivy.__file__)
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.popup import Popup
from kivy.clock import Clock
print('Imported kivy')
from kivy.utils import platform
print('platform is', platform)
import peewee
import requests
import sqlite3
try:
inclemnet = requests.get('http://inclem.net/')
print('got inclem.net request')
except:
inclemnet = 'failed inclemnet'
try:
kivy = requests.get('https://kivy.org/')
print('got kivy request (https)')
except:
kivy = 'failed kivy'
from peewee import *
db = SqliteDatabase('test.db')
class Person(Model):
name = CharField()
birthday = DateField()
is_relative = BooleanField()
class Meta:
database = db
def __repr__(self):
return '<Person: {}, {}>'.format(self.name, self.birthday)
def __str__(self):
return repr(self)
db.connect()
try:
db.create_tables([Person])
except:
import traceback
traceback.print_exc()
import random
from datetime import date
test_person = Person(name='person{}'.format(random.randint(0, 1000)),
birthday=date(random.randint(1900, 2000), random.randint(1, 9), random.randint(1, 20)),
is_relative=False)
test_person.save()
kv = '''
#:import Metrics kivy.metrics.Metrics
#:import sys sys
<FixedSizeButton@Button>:
size_hint_y: None
height: dp(60)
ScrollView:
GridLayout:
cols: 1
size_hint_y: None
height: self.minimum_height
FixedSizeButton:
text: 'test pyjnius'
on_press: app.test_pyjnius()
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: 'kivy request: {}\\ninclemnet request: {}'.format(app.kivy_request, app.inclemnet_request)
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: 'people: {}'.format(app.people)
halign: 'center'
Image:
keep_ratio: False
allow_stretch: True
source: 'colours.png'
size_hint_y: None
height: dp(100)
Label:
height: self.texture_size[1]
size_hint_y: None
font_size: 100
text_size: self.size[0], None
markup: True
text: '[b]Kivy[/b] on [b]SDL2[/b] on [b]Android[/b]!'
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: sys.version
halign: 'center'
padding_y: dp(10)
Widget:
size_hint_y: None
height: 20
Label:
height: self.texture_size[1]
size_hint_y: None
font_size: 50
text_size: self.size[0], None
markup: True
text: 'dpi: {}\\ndensity: {}\\nfontscale: {}'.format(Metrics.dpi, Metrics.density, Metrics.fontscale)
halign: 'center'
FixedSizeButton:
text: 'test ctypes'
on_press: app.test_ctypes()
FixedSizeButton:
text: 'test numpy'
on_press: app.test_numpy()
Widget:
size_hint_y: None
height: 1000
on_touch_down: print('touched at', args[-1].pos)
<ErrorPopup>:
title: 'Error'
size_hint: 0.75, 0.75
Label:
text: root.error_text
'''
class ErrorPopup(Popup):
error_text = StringProperty('')
def raise_error(error):
print('ERROR:', error)
ErrorPopup(error_text=error).open()
class TestApp(App):
kivy_request = kivy
inclemnet_request = inclemnet
people = ', '.join(map(str, list(Person.select())))
def build(self):
root = Builder.load_string(kv)
Clock.schedule_interval(self.print_something, 2)
# Clock.schedule_interval(self.test_pyjnius, 5)
print('testing metrics')
from kivy.metrics import Metrics
print('dpi is', Metrics.dpi)
print('density is', Metrics.density)
print('fontscale is', Metrics.fontscale)
return root
def print_something(self, *args):
print('App print tick', Clock.get_boottime())
def on_pause(self):
return True
def test_pyjnius(self, *args):
try:
from jnius import autoclass
except ImportError:
raise_error('Could not import pyjnius')
return
print('Attempting to vibrate with pyjnius')
# PythonActivity = autoclass('org.renpy.android.PythonActivity')
# activity = PythonActivity.mActivity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
activity = PythonActivity.mActivity
Intent = autoclass('android.content.Intent')
Context = autoclass('android.content.Context')
vibrator = activity.getSystemService(Context.VIBRATOR_SERVICE)
vibrator.vibrate(1000)
def test_ctypes(self, *args):
import ctypes
def test_numpy(self, *args):
import numpy
print(numpy.zeros(5))
print(numpy.arange(5))
print(numpy.random.random((3, 3)))
TestApp().run()
| {
"content_hash": "b16244914823591ee7c096fdca51804e",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 113,
"avg_line_length": 25.78828828828829,
"alnum_prop": 0.5807860262008734,
"repo_name": "wexi/python-for-android",
"id": "a8083ab961f57e38d81e5a510658a70c17f821c0",
"size": "5725",
"binary": false,
"copies": "2",
"ref": "refs/heads/local",
"path": "testapps/testapp_sqlite_openssl/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70942"
},
{
"name": "C++",
"bytes": "491"
},
{
"name": "CMake",
"bytes": "250"
},
{
"name": "CSS",
"bytes": "3487"
},
{
"name": "HTML",
"bytes": "11631"
},
{
"name": "Java",
"bytes": "511420"
},
{
"name": "Makefile",
"bytes": "27280"
},
{
"name": "Python",
"bytes": "2941232"
},
{
"name": "Shell",
"bytes": "5340"
}
],
"symlink_target": ""
} |
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces_dataset` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
# #############################################################################
# Load faces data
faces, _ = fetch_olivetti_faces(return_X_y=True, shuffle=True,
random_state=rng)
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=cmap,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
# #############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - PCA using randomized SVD',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=20),
True),
]
# #############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
# #############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
# Plot an image representing the pixelwise variance provided by the
# estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,
# via the PCA decomposition, also provides a scalar noise_variance_
# (the mean of pixelwise variance) that cannot be displayed as an image
# so we skip it.
if (hasattr(estimator, 'noise_variance_') and
estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
# #############################################################################
# Various positivity constraints applied to dictionary learning.
estimators = [
('Dictionary learning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Dictionary learning - positive dictionary',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng,
positive_dict=True),
True),
('Dictionary learning - positive code',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
fit_algorithm='cd',
random_state=rng,
positive_code=True),
True),
('Dictionary learning - positive dictionary & code',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
fit_algorithm='cd',
random_state=rng,
positive_dict=True,
positive_code=True),
True),
]
# #############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components],
cmap=plt.cm.RdBu)
# #############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
components_ = estimator.components_
plot_gallery(name, components_[:n_components], cmap=plt.cm.RdBu)
plt.show()
| {
"content_hash": "4b5c30cfdb6ff6dc9c4b5d3f540888d4",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 37.376288659793815,
"alnum_prop": 0.532754102882361,
"repo_name": "huzq/scikit-learn",
"id": "84e6f923f0d3bf54915e9d0a67e0d05d39391857",
"size": "7251",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/decomposition/plot_faces_decomposition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
} |
import logging
from celery import uuid as celery_uuid
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import (
AdminPasswordChangeForm, UserChangeForm, UserCreationForm,
)
from django.core.exceptions import PermissionDenied
from django.core.files.storage import FileSystemStorage
from django.db import transaction
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
from .models import GenoomyUser
from disease.models import AnalyzeDataOrder
from disease.tasks import recompute_genome_file
from disease.files_utils import get_genome_dirpath, get_genome_filepath, \
process_filename, parse_raw_genome_file, \
process_genoome_data, handle_zipped_genome_file
log = logging.getLogger(__name__)
storage = FileSystemStorage()
class GenoomyUserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2'),
}),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff', 'date_joined', 'last_login')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
actions = ['refresh_user_genome_data']
def refresh_user_genome_data(self, request, queryset):
for user in queryset:
genome_dirpath = get_genome_dirpath(user)
if storage.exists(genome_dirpath):
_, files = storage.listdir(genome_dirpath)
for file in files:
filename, ext = file.rsplit('.', 1)
if filename.endswith('_processed'):
continue
log.debug('Processing file: %s', file)
genome_filepath = get_genome_filepath(user, file)
try:
analyze_data_order = AnalyzeDataOrder.objects.get(uploaded_filename=file, user=user)
except AnalyzeDataOrder.DoesNotExist:
log.debug('AnalyzeDataOrder not found. File %s, user %s', file, user)
continue
analyze_data_order.task_uuid = celery_uuid()
analyze_data_order.save()
recompute_genome_file.apply_async(args=(genome_filepath,),
task_id=analyze_data_order.task_uuid)
self.message_user(request, 'Successfully added recomputation tasks for %s users' % len(queryset), level='SUCCESS')
refresh_user_genome_data.short_descrition = 'Schedule recomputation of user genome data'
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(GenoomyUserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults['form'] = self.add_form
defaults.update(kwargs)
return super(GenoomyUserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
return [
url(r'^(.+)/password/$', self.admin_site.admin_view(self.user_change_password), name='auth_user_password_change'),
url(r'^(.+)/uploaded-files/$', self.admin_site.admin_view(self.user_uploaded_files), name='user_uploaded_files'),
] + super(GenoomyUserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(GenoomyUserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(GenoomyUserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = self.get_object(request, unquote(id))
if user is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(self.model._meta.verbose_name),
'key': escape(id),
})
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': (IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
context.update(admin.site.each_context(request))
request.current_app = self.admin_site.name
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST['_continue'] = 1
return super(GenoomyUserAdmin, self).response_add(request, obj,
post_url_continue)
def user_uploaded_files(self, request, id):
user = self.get_object(request, unquote(id))
if user is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(self.model._meta.verbose_name),
'key': escape(id),
})
ctx = {'is_admin': True, 'user_pk': user.pk}
ctx['saved_genome_data'] = user.uploaded_files
return TemplateResponse(request, 'user_profile.html', ctx)
admin.site.register(GenoomyUser, GenoomyUserAdmin)
| {
"content_hash": "6b7cfe18fc8aba2ae01ddb7126990246",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 126,
"avg_line_length": 45.2787610619469,
"alnum_prop": 0.6105736343203362,
"repo_name": "jiivan/genoomy",
"id": "db9f27448f690436b2ae6e4cd37ac47bebe7e4ed",
"size": "10233",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev_deploy",
"path": "genoome/accounts/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51082"
},
{
"name": "HTML",
"bytes": "47982"
},
{
"name": "JavaScript",
"bytes": "31061"
},
{
"name": "Python",
"bytes": "138292"
},
{
"name": "Shell",
"bytes": "5962"
}
],
"symlink_target": ""
} |
import pbr.version
__version__ = pbr.version.VersionInfo(
'zm').version_string() | {
"content_hash": "6fdcbccfb582d8bf6796586dd5c6a610",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 17.2,
"alnum_prop": 0.686046511627907,
"repo_name": "orviz/zabbix-cli-ent",
"id": "f8325059a7331dacd512c069840e0c88e224ccf2",
"size": "657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25426"
}
],
"symlink_target": ""
} |
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import GeneratorContextManager, contextmanager
from test.support import run_unittest
class MockContextManager(GeneratorContextManager):
def __init__(self, gen):
GeneratorContextManager.__init__(self, gen)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func(*args, **kwds))
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0](ex[1]).with_traceback(ex[2])
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
def setUp(self):
self.TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args[0], RuntimeError)
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(ContextmanagerAssertionMixin, unittest.TestCase):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise next(iter([]))
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __bool__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as list(targets.values())[0][1]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = list(targets.keys())
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (list(targets.values())[0][2], list(targets.values())[0][1], list(targets.values())[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
class NestedWith(unittest.TestCase):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
def testNoExceptions(self):
with self.Dummy() as a, self.Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with self.Dummy() as a, self.InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with self.Dummy() as a, self.EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with self.Dummy(gobble=True) as a, self.ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with self.Dummy(value=(1,2)) as (a1, a2), \
self.Dummy(value=(10, 20)) as (b1, b2):
self.assertEquals(1, a1)
self.assertEquals(2, a2)
self.assertEquals(10, b1)
self.assertEquals(20, b2)
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase,
NestedWith)
if __name__ == '__main__':
test_main()
| {
"content_hash": "601568fe0e2aa2bd2242c5eeb4bfcfd3",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 111,
"avg_line_length": 35.225067385444746,
"alnum_prop": 0.6169032406167502,
"repo_name": "MalloyPower/parsing-python",
"id": "ae2fa4d7fe4da4790990d5404fe2446c94b65346",
"size": "26160",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/test/test_with.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
""" Chemistry utilities
:Author: Jonathan Karr <[email protected]>
:Date: 2018-02-07
:Copyright: 2018, Karr Lab
:License: MIT
"""
import attrdict
import mendeleev
import re
try:
import openbabel
except ModuleNotFoundError: # pragma: no cover
pass # pragma: no cover
class EmpiricalFormula(attrdict.AttrDefault):
""" An empirical formula """
def __init__(self, value=''):
"""
Args:
value (:obj:`dict` or :obj:`str`): dictionary or string representation of the formula
Raises:
:obj:`ValueError`: if :obj:`value` is not a valid formula
"""
super(EmpiricalFormula, self).__init__(float)
if isinstance(value, (dict, attrdict.AttrDict, attrdict.AttrDefault)):
for element, coefficient in value.items():
self[element] = coefficient
else:
if not re.match(r'^(([A-Z][a-z]?)(\-?[0-9]+(\.?[0-9]*)?(e[\-\+]?[0-9]*)?)?)*$', value):
raise ValueError('"{}" is not a valid formula'.format(value))
for element, coefficient, _, _ in re.findall(r'([A-Z][a-z]?)(\-?[0-9]+(\.?[0-9]*)?(e[\-\+]?[0-9]*)?)?', value):
self[element] += float(coefficient or '1')
def __setitem__(self, element, coefficient):
""" Set the count of an element
Args:
element (:obj:`str`): element symbol
coefficient (:obj:`float`): element coefficient
Raises:
:obj:`ValueError`: if the coefficient is not a float
"""
if not re.match(r'^[A-Z][a-z]?$', element):
raise ValueError('Element must be a one or two letter string')
try:
coefficient = float(coefficient)
except ValueError:
raise ValueError('Coefficient must be a float')
super(EmpiricalFormula, self).__setitem__(element, coefficient)
if coefficient == 0.:
self.pop(element)
def get_molecular_weight(self):
""" Get the molecular weight
Returns:
:obj:`float`: molecular weight
"""
mw = 0.
for element, coefficient in self.items():
mw += mendeleev.element(element).atomic_weight * coefficient
return mw
def __str__(self):
""" Generate a string representation of the formula """
vals = []
for element, coefficient in self.items():
if coefficient == 0.:
pass # pragma: no cover # unreachable due to `__setitem__`
elif coefficient == 1.:
vals.append(element)
elif coefficient == int(coefficient):
vals.append(element + str(int(coefficient)))
else:
vals.append(element + str(coefficient))
vals.sort()
return ''.join(vals)
def __contains__(self, element):
"""
Args:
element (:obj:`str`): element symbol
Returns:
:obj:`bool`: :obj:`True` if the empirical formula contains the element
"""
return re.match(r'^[A-Z][a-z]?$', element) is not None
def __add__(self, other):
""" Add two empirical formulae
Args:
other (:obj:`EmpiricalFormula` or :obj:`str`): another empirical formula
Returns:
:obj:`EmpiricalFormula`: sum of the empirical formulae
"""
if isinstance(other, str):
other = EmpiricalFormula(other)
sum = EmpiricalFormula()
for element, coefficient in self.items():
sum[element] = coefficient
for element, coefficient in other.items():
sum[element] += coefficient
return sum
def __sub__(self, other):
""" Subtract two empirical formulae
Args:
other (:obj:`EmpiricalFormula` or :obj:`str`): another empirical formula
Returns:
:obj:`EmpiricalFormula`: difference of the empirical formulae
"""
if isinstance(other, str):
other = EmpiricalFormula(other)
diff = EmpiricalFormula()
for element, coefficient in self.items():
diff[element] = coefficient
for element, coefficient in other.items():
diff[element] -= coefficient
return diff
def __mul__(self, quantity):
""" Subtract two empirical formulae
Args:
quantity (:obj:`float`)
Returns:
:obj:`EmpiricalFormula`: multiplication of the empirical formula by :obj:`quantity`
"""
result = EmpiricalFormula()
for element, coefficient in self.items():
result[element] = quantity * coefficient
return result
def __div__(self, quantity):
""" Subtract two empirical formulae (for Python 2)
Args:
quantity (:obj:`float`)
Returns:
:obj:`EmpiricalFormula`: division of the empirical formula by :obj:`quantity`
"""
return self.__truediv__(quantity) # pragma: no cover # only used in Python 2
def __truediv__(self, quantity):
""" Subtract two empirical formulae
Args:
quantity (:obj:`float`)
Returns:
:obj:`EmpiricalFormula`: division of the empirical formula by :obj:`quantity`
"""
result = EmpiricalFormula()
for element, coefficient in self.items():
result[element] = coefficient / quantity
return result
def __hash__(self):
""" Generate a hash
Returns:
:obj:`int`: hash
"""
return hash(str(self))
class OpenBabelUtils(object):
@staticmethod
def get_formula(mol):
""" Get the formula of an OpenBabel molecule
Args:
mol (:obj:`openbabel.OBMol`): molecule
Returns:
:obj:`EmpiricalFormula`: formula
"""
return EmpiricalFormula(mol.GetFormula().strip('-+'))
@classmethod
def get_inchi(cls, mol, options=('r', 'F',)):
""" Get the InChI-encoded structure of an OpenBabel molecule
Args:
mol (:obj:`openbabel.OBMol`): molecule
options (:obj:`list` of :obj:`str`, optional): export options
Returns:
:obj:`str`: InChI-encoded structure
"""
conversion = openbabel.OBConversion()
assert conversion.SetOutFormat('inchi'), 'Unable to set format to InChI'
for option in options:
conversion.SetOptions(option, conversion.OUTOPTIONS)
inchi = conversion.WriteString(mol, True)
inchi = inchi.replace('InChI=1/', 'InChI=1S/')
inchi = inchi.partition('/f')[0]
return inchi
@classmethod
def get_smiles(cls, mol, options=()):
""" Get the SMILES-encoded structure of an OpenBabel molecule
Args:
mol (:obj:`openbabel.OBMol`): molecule
options (:obj:`list` of :obj:`str`, optional): export options
Returns:
:obj:`str`: SMILES-encoded structure
"""
conversion = openbabel.OBConversion()
assert conversion.SetOutFormat('smiles'), 'Unable to set format to Daylight SMILES'
for option in options:
conversion.SetOptions(option, conversion.OUTOPTIONS)
return conversion.WriteString(mol).partition('\t')[0].strip()
@classmethod
def export(cls, mol, format, options=()):
""" Export an OpenBabel molecule to format
Args:
mol (:obj:`openbabel.OBMol`): molecule
format (:obj:`str`): format
options (:obj:`list` of :obj:`str`, optional): export options
Returns:
:obj:`str`: format representation of molecule
"""
if format == 'inchi':
return cls.get_inchi(mol, options=options)
if format in ['smi', 'smiles']:
return cls.get_smiles(mol, options=options)
conversion = openbabel.OBConversion()
assert conversion.SetOutFormat(format), 'Unable to set format to {}'.format(format)
for option in options:
conversion.SetOptions(option, conversion.OUTOPTIONS)
return conversion.WriteString(mol, True)
| {
"content_hash": "99543be5d22c9f5220f3aabeef989410",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 123,
"avg_line_length": 31.251908396946565,
"alnum_prop": 0.5675378602833415,
"repo_name": "KarrLab/wc_utils",
"id": "a5d06154d7231aa1e3a95782d72a7cd647409e53",
"size": "8188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wc_utils/util/chem/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "6784"
},
{
"name": "Python",
"bytes": "415285"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
import datetime
import logging
import time
import pathlib
from typing import Any, Dict, List, Optional, Union
import requests
import lxml.etree
from .togetter import Togetter
from .togetter_page import TogetterPage
from .tweet import Tweet
class TogetterPageParser(object):
def __init__(
self,
page_id: int,
session: requests.sessions.Session = None,
logger: logging.Logger = None) -> None:
"""Initialize
Args:
page_id (int): the ID of the togetter page.
session (requests.sessions.Session) optional:
A Requests Session.
Defaults to None. Then new Session will be created.
logger (logging.Logger) optional:
Logger.
Defaults to None. Then new Logger will be created."""
# logger設定
if logger is None:
logger = logging.getLogger(__name__)
self._logger = logger
# Wait Time
self._wait_time = 1.0
# get Initial Page
self._initial_page = TogetterPage(
page_id,
page_number=1,
session=session,
logger=logger)
# Page List
self._page_list = None # type: Optional[List[TogetterPage]]
# Tweet List
self._tweet_list = None # type: Optional[List[Tweet]]
def load_page(self) -> None:
"""Load all the pages of this togetter ID."""
if self._page_list is None:
self._page_list = []
self._page_list.append(self._initial_page)
while True:
next_page = self._page_list[-1].next_page()
if next_page is None:
break
self._page_list.append(next_page)
time.sleep(self.wait_time)
def get_tweet_list(self) -> List[Tweet]:
"""Get Tweet list from all the pages.
Returns:
list[Tweet]"""
if self._tweet_list is None:
if self._page_list is None:
self.load_page()
self._tweet_list = []
for page in self._page_list:
self._tweet_list.extend(page.get_tweet_list())
return self._tweet_list
def parse(self) -> Togetter:
"""create Togetter of this togetter page ID.
Returns:
Togetter"""
kwargs: Dict[str, Any] = {}
kwargs['title'] = self._initial_page.title
kwargs['page_id'] = self._initial_page.page_id
kwargs['url'] = self._initial_page.url
kwargs['access_timestamp'] = datetime.datetime.today().timestamp()
kwargs['tweet_list'] = self.get_tweet_list()
return Togetter(**kwargs)
@property
def wait_time(self) -> float:
return self._wait_time
@wait_time.setter
def wait_time(self, value: float):
self._wait_time = value
self._logger.debug(
'set Wait Time: {0} seconds'.format(self._wait_time))
@classmethod
def save_as_xml(
cls,
page_id: int,
filepath: Union[str, pathlib.Path],
logger: logging.Logger = None):
"""load Togetter pages, and output in the file as XML.
Args:
page_id (int): the ID of the togetter page.
filepath (str, pathlib.Path): the path of the file to be output as XML.
logger (logging.Logger) optional:
Logger.
Defaults to None. Then new Logger will be created.
"""
parser = TogetterPageParser(page_id, logger=logger)
parser.parse().save_as_xml(filepath)
| {
"content_hash": "4241ed15a2d26fa74943e2eab325cf49",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 33.18181818181818,
"alnum_prop": 0.5512328767123288,
"repo_name": "085astatine/togetter",
"id": "362841a10411fd5c559fc78b83f5735cf8d16277",
"size": "3680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "togetter/togetter_page_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27222"
}
],
"symlink_target": ""
} |
"""Test the Pentair ScreenLogic config flow."""
from unittest.mock import patch
from screenlogicpy import ScreenLogicError
from screenlogicpy.const import (
SL_GATEWAY_IP,
SL_GATEWAY_NAME,
SL_GATEWAY_PORT,
SL_GATEWAY_SUBTYPE,
SL_GATEWAY_TYPE,
)
from homeassistant import config_entries
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS
from homeassistant.components.screenlogic.config_flow import (
GATEWAY_MANUAL_ENTRY,
GATEWAY_SELECT_KEY,
)
from homeassistant.components.screenlogic.const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
MIN_SCAN_INTERVAL,
)
from homeassistant.const import CONF_IP_ADDRESS, CONF_PORT, CONF_SCAN_INTERVAL
from tests.common import MockConfigEntry
async def test_flow_discovery(hass):
"""Test the flow works with basic discovery."""
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[
{
SL_GATEWAY_IP: "1.1.1.1",
SL_GATEWAY_PORT: 80,
SL_GATEWAY_TYPE: 12,
SL_GATEWAY_SUBTYPE: 2,
SL_GATEWAY_NAME: "Pentair: 01-01-01",
},
],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_select"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={GATEWAY_SELECT_KEY: "00:c0:33:01:01:01"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Pentair: 01-01-01"
assert result2["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_flow_discover_none(hass):
"""Test when nothing is discovered."""
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_entry"
async def test_flow_discover_error(hass):
"""Test when discovery errors."""
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
side_effect=ScreenLogicError("Fake error"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_entry"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=True,
), patch(
"homeassistant.components.screenlogic.config_flow.login.gateway_connect",
return_value="00-C0-33-01-01-01",
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "Pentair: 01-01-01"
assert result3["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_dhcp(hass):
"""Test DHCP discovery flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "Pentair: 01-01-01",
IP_ADDRESS: "1.1.1.1",
},
)
assert result["type"] == "form"
assert result["step_id"] == "gateway_entry"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=True,
), patch(
"homeassistant.components.screenlogic.config_flow.login.gateway_connect",
return_value="00-C0-33-01-01-01",
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "Pentair: 01-01-01"
assert result3["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_manual_entry(hass):
"""Test we get the form."""
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[
{
SL_GATEWAY_IP: "1.1.1.1",
SL_GATEWAY_PORT: 80,
SL_GATEWAY_TYPE: 12,
SL_GATEWAY_SUBTYPE: 2,
SL_GATEWAY_NAME: "Pentair: 01-01-01",
},
],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_select"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={GATEWAY_SELECT_KEY: GATEWAY_MANUAL_ENTRY}
)
assert result2["type"] == "form"
assert result2["errors"] == {}
assert result2["step_id"] == "gateway_entry"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=True,
), patch(
"homeassistant.components.screenlogic.config_flow.login.gateway_connect",
return_value="00-C0-33-01-01-01",
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "Pentair: 01-01-01"
assert result3["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {CONF_IP_ADDRESS: "cannot_connect"}
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
), patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SCAN_INTERVAL: 15},
)
assert result["type"] == "create_entry"
assert result["data"] == {CONF_SCAN_INTERVAL: 15}
async def test_option_flow_defaults(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
), patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
}
async def test_option_flow_input_floor(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
), patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 1}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: MIN_SCAN_INTERVAL,
}
| {
"content_hash": "6d5e811e143cf3255e7bd5bbee7640f1",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 84,
"avg_line_length": 31.78735632183908,
"alnum_prop": 0.6117338636774543,
"repo_name": "aronsky/home-assistant",
"id": "d1333cb751421b2c492ae39e82f470a61a9e88f3",
"size": "11062",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/screenlogic/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from django.contrib.gis import admin
from survey.models import GravelSite, Pit, InputNode, Question, Context, QuestionCategory, BifSettings, PitScoreWeight, PitQuestionAnswer
admin.site.register(GravelSite, admin.GeoModelAdmin)
admin.site.register(Pit, admin.GeoModelAdmin)
admin.site.register(InputNode, admin.ModelAdmin)
# admin.site.register(MapLayer, admin.ModelAdmin)
# admin.site.register(BifSettings, admin.ModelAdmin)
class ContextAdmin(admin.ModelAdmin):
list_display = ('name', 'order')
list_filter = ['name', 'order']
ordering = ['order', 'name']
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'context', 'order')
list_filter = ['name', 'context', 'order']
ordering = ['context__order', 'order', 'name']
class QuestionAdmin(admin.ModelAdmin):
list_display = ('title', 'question', 'order', 'questionCategory')
list_filter = ['title', 'question', 'order', 'questionCategory']
ordering = ['order', 'title', 'questionCategory']
admin.site.register(Context, ContextAdmin)
admin.site.register(QuestionCategory, CategoryAdmin)
admin.site.register(Question, QuestionAdmin)
class PitAnswerInline(admin.TabularInline):
model = PitQuestionAnswer
class PitScoreAdmin(admin.ModelAdmin):
list_display = ('score', 'visible','disabled','value','questionText','order')
list_filter = ['visible','disabled','score','value','questionText']
ordering = ['order','score', 'value','questionText']
inlines = [
PitAnswerInline,
]
admin.site.register(PitScoreWeight, PitScoreAdmin)
class BifAdmin(admin.ModelAdmin):
# def changelist_view(self, request):
# return survey_views.edit_bbn(self, request)
list_display = ('notes', 'user', 'date_modified')
def change_view(self, request, bifid):
from survey import views as survey_views
return survey_views.admin_change_form(self, request, bifid)
def add_view(self, request):
from survey import views as survey_views
return survey_views.admin_add_form(self, request)
def get_urls(self):
from django.conf.urls import patterns
# Set up the URLS dynamically
urls = super(BifAdmin, self).get_urls()
my_urls = patterns('',
# ('^$', self.changelist_view),
('^(?P<bifid>d+)/$', self.change_view),
('^add/$', self.add_view),
)
return my_urls + urls
def save_model(self, request, obj, form, change):
try:
obj.save()
from survey.views import update_bbn_bif
update_bbn_bif(obj, request.POST)
except:
pass
admin.site.register(BifSettings, BifAdmin)
| {
"content_hash": "66a63b15cc4d4afcd982ece02181e8e8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 137,
"avg_line_length": 34.92307692307692,
"alnum_prop": 0.6563876651982379,
"repo_name": "Ecotrust/floodplain-restoration",
"id": "049eae2e030b2ec025606e7738d8d7e84f679937",
"size": "2724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dst/survey/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18811"
},
{
"name": "HTML",
"bytes": "91644"
},
{
"name": "JavaScript",
"bytes": "128271"
},
{
"name": "Python",
"bytes": "182327"
},
{
"name": "Shell",
"bytes": "4981"
}
],
"symlink_target": ""
} |
import unittest
import httpretty
from u2fval_client.client import (
Client,
)
from u2fval_client.exc import (
BadAuthException,
BadInputException,
ServerUnreachableException,
InvalidResponseException,
U2fValClientException,
)
@httpretty.activate
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client('https://example')
def test_endpoint_sanitised(self):
self.assertEqual('https://example/', self.client._endpoint)
def test_get_trusted_facets(self):
httpretty.register_uri('GET', 'https://example/',
body='{}')
self.assertEqual(self.client.get_trusted_facets(), {})
def test_get_trusted_facets_empty_response_body(self):
httpretty.register_uri('GET', 'https://example/',
body='')
self.assertRaises(InvalidResponseException,
self.client.get_trusted_facets)
def test_get_trusted_facets_error_code(self):
httpretty.register_uri('GET', 'https://example/',
body='{"errorCode": 10}',
status=400)
self.assertRaises(BadInputException, self.client.get_trusted_facets)
def test_get_trusted_facets_unauthorized(self):
httpretty.register_uri('GET', 'https://example/', status=401)
self.assertRaises(BadAuthException, self.client.get_trusted_facets)
def test_get_trusted_facets_not_found(self):
httpretty.register_uri('GET', 'https://example/', status=404)
self.assertRaises(U2fValClientException, self.client.get_trusted_facets)
def test_get_trusted_facets_internal_server_error(self):
httpretty.register_uri('GET', 'https://example/', status=500)
self.assertRaises(U2fValClientException, self.client.get_trusted_facets)
def test_get_trusted_facets_server_unreachable(self):
# Intentionally has no httpretty mock registered
self.assertRaises(ServerUnreachableException,
self.client.get_trusted_facets)
def test_list_devices(self):
httpretty.register_uri('GET', 'https://example/black_knight/',
body='{}')
self.assertEqual(self.client.list_devices('black_knight'), {})
def test_register_begin(self):
httpretty.register_uri('GET', 'https://example/black_knight/register',
body='{}')
self.assertEqual(self.client.register_begin('black_knight'), {})
def test_register_complete(self):
httpretty.register_uri('POST', 'https://example/black_knight/register',
body='{}')
self.assertEqual(self.client.register_complete('black_knight', '{}'),
{})
req = httpretty.last_request()
self.assertEqual(req.parsed_body, {'registerResponse': {}})
def test_unregister(self):
httpretty.register_uri('DELETE', 'https://example/black_knight/abc123',
body='', status=204)
self.assertIsNone(self.client.unregister('black_knight', 'abc123'))
def test_auth_begin(self):
httpretty.register_uri('GET', 'https://example/black_knight/sign',
body='{}')
self.assertEqual(self.client.auth_begin('black_knight'), {})
def test_auth_complete(self):
httpretty.register_uri('POST', 'https://example/black_knight/sign',
body='{}')
self.assertEqual(self.client.auth_complete('black_knight', '{}'), {})
req = httpretty.last_request()
self.assertEqual(req.parsed_body, {'signResponse': {}})
| {
"content_hash": "91f6d677ea0b231db89f211e7777291a",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 40.22826086956522,
"alnum_prop": 0.6098351796811673,
"repo_name": "Yubico/u2fval-client-python",
"id": "0db0fb0a250adb6310b589d3fcf28ad11b7c28f3",
"size": "3701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_client.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "27878"
}
],
"symlink_target": ""
} |
import sys
import pickle
import os
import shutil
_current_dir = os.path.abspath(os.path.dirname(__file__))
import_path = os.path.join(_current_dir, '..')
sys.path.append(import_path)
from lib.common import list_file_paths
from lib.pdf_genome import PdfGenome
from lib.detector import query_classifier
class ExtGenome:
def __init__(self, classifier_name, folder, file_number):
self.classifier_func = lambda *args:query_classifier(classifier_name, *args)
self.folder = folder
self.fpaths = list_file_paths(self.folder)
self.file_number = file_number
def classifier(self, *args):
return self.classifier_func(*args)
def path_count(self, file_paths):
ret = []
for fpath in file_paths:
pdf_obj = PdfGenome.load_genome(fpath)
paths = PdfGenome.get_object_paths(pdf_obj)
ret.append(len(paths))
return ret
# Note: I don't think the score of externals is that important. What really matters is the diversity of the structure.
def select_files(self):
file_paths = self.fpaths
limit = self.file_number
classifier_results = self.classifier(file_paths)
path_count = self.path_count(file_paths)
file_size = map(os.path.getsize, file_paths)
file_size = map(lambda x:x/float(1024), file_size)
chose_idx = sorted(range(len(classifier_results)), key=lambda i: (classifier_results[i], file_size[i]))
for idx in chose_idx:
print ("Score: %.2f, Path_count: %d, File_size: %.1f KB, Name: %s" % (classifier_results[idx], path_count[idx], file_size[idx], os.path.basename(file_paths[idx])))
chose_idx = chose_idx[:limit]
print ("Chose %d external files." % (limit))
file_paths_sub = [file_paths[i] for i in chose_idx]
return file_paths_sub
def load_external_genome(self, file_paths):
ext_pdf_paths = [] # element: (entry, path)
self.genome_desc = []
for file_path in file_paths:
pdf_obj = PdfGenome.load_genome(file_path)
paths = PdfGenome.get_object_paths(pdf_obj)
for path in paths:
ext_pdf_paths.append((pdf_obj, path))
self.genome_desc.append((file_path, len(path)))
return ext_pdf_paths
@staticmethod
def copy_file_to_folders(flist, tgt_folder):
if not os.path.isdir(tgt_folder):
os.makedirs(tgt_folder)
for fpath in flist:
shutil.copy2(fpath, tgt_folder)
if __name__ == '__main__':
if len(sys.argv) < 4:
print "./%s [classifier_name] [sample_folder] [file_number]" % (sys.argv[0])
classifier_name, sample_folder, file_limit = sys.argv[1:4]
file_limit = int(file_limit)
ext_genome_folder = os.path.join(_current_dir, "../samples/ext_genome/%s_%d_new" % (classifier_name, file_limit))
ext_genome_folder = os.path.abspath(ext_genome_folder)
pdf_geno = ExtGenome(classifier_name, sample_folder, file_limit)
selected_files = pdf_geno.select_files()
answer = raw_input("Do you want to copy the %d selected files to %s? [y/N]" % (len(selected_files), ext_genome_folder))
if answer == 'y':
ExtGenome.copy_file_to_folders(selected_files, ext_genome_folder) | {
"content_hash": "0aec7ff3d56b2e086549722352d4b30f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 175,
"avg_line_length": 38.64705882352941,
"alnum_prop": 0.6316590563165906,
"repo_name": "uvasrg/EvadeML",
"id": "a76c167686393a017ab0cd4ff33a6ef2cbc08579",
"size": "3309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/generate_ext_genome.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "326041"
},
{
"name": "HTML",
"bytes": "249194"
},
{
"name": "JavaScript",
"bytes": "80804"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "102366"
},
{
"name": "Ruby",
"bytes": "1420"
}
],
"symlink_target": ""
} |
"""Creates an AndroidManifest.xml for an incremental APK.
Given the manifest file for the real APK, generates an AndroidManifest.xml with
the application class changed to IncrementalApplication.
"""
import argparse
import os
import sys
from xml.etree import ElementTree
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, 'gyp'))
from util import build_utils
from util import manifest_utils
_INCREMENTAL_APP_NAME = 'org.chromium.incrementalinstall.BootstrapApplication'
_META_DATA_APP_NAME = 'incremental-install-real-app'
_DEFAULT_APPLICATION_CLASS = 'android.app.Application'
_META_DATA_INSTRUMENTATION_NAMES = [
'incremental-install-real-instrumentation-0',
'incremental-install-real-instrumentation-1',
]
_INCREMENTAL_INSTRUMENTATION_CLASSES = [
'android.app.Instrumentation',
'org.chromium.incrementalinstall.SecondInstrumentation',
]
def _AddNamespace(name):
"""Adds the android namespace prefix to the given identifier."""
return '{%s}%s' % (manifest_utils.ANDROID_NAMESPACE, name)
def _ParseArgs(args):
parser = argparse.ArgumentParser()
parser.add_argument('--src-manifest',
required=True,
help='The main manifest of the app.')
parser.add_argument('--dst-manifest',
required=True,
help='The output modified manifest.')
parser.add_argument('--disable-isolated-processes',
help='Changes all android:isolatedProcess to false. '
'This is required on Android M+',
action='store_true')
ret = parser.parse_args(build_utils.ExpandFileArgs(args))
return ret
def _CreateMetaData(parent, name, value):
meta_data_node = ElementTree.SubElement(parent, 'meta-data')
meta_data_node.set(_AddNamespace('name'), name)
meta_data_node.set(_AddNamespace('value'), value)
def _ProcessManifest(path, disable_isolated_processes):
doc, _, app_node = manifest_utils.ParseManifest(path)
# Pylint for some reason things app_node is an int.
# pylint: disable=no-member
real_app_class = app_node.get(_AddNamespace('name'),
_DEFAULT_APPLICATION_CLASS)
app_node.set(_AddNamespace('name'), _INCREMENTAL_APP_NAME)
# pylint: enable=no-member
_CreateMetaData(app_node, _META_DATA_APP_NAME, real_app_class)
# Seems to be a bug in ElementTree, as doc.find() doesn't work here.
instrumentation_nodes = doc.findall('instrumentation')
assert len(instrumentation_nodes) <= 2, (
'Need to update incremental install to support >2 <instrumentation> tags')
for i, instrumentation_node in enumerate(instrumentation_nodes):
real_instrumentation_class = instrumentation_node.get(_AddNamespace('name'))
instrumentation_node.set(_AddNamespace('name'),
_INCREMENTAL_INSTRUMENTATION_CLASSES[i])
_CreateMetaData(app_node, _META_DATA_INSTRUMENTATION_NAMES[i],
real_instrumentation_class)
ret = ElementTree.tostring(doc.getroot(), encoding='UTF-8')
# Disable check for page-aligned native libraries.
ret = ret.replace(b'extractNativeLibs="false"', b'extractNativeLibs="true"')
if disable_isolated_processes:
ret = ret.replace(b'isolatedProcess="true"', b'isolatedProcess="false"')
# externalService only matters for isolatedProcess="true". See:
# https://developer.android.com/reference/android/R.attr#externalService
ret = ret.replace(b'externalService="true"', b'externalService="false"')
return ret
def main(raw_args):
options = _ParseArgs(raw_args)
new_manifest_data = _ProcessManifest(options.src_manifest,
options.disable_isolated_processes)
with build_utils.AtomicOutput(options.dst_manifest) as out_manifest:
out_manifest.write(new_manifest_data)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "817b39a07221cd9418c41cca3fe250d9",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 38.9,
"alnum_prop": 0.6930591259640103,
"repo_name": "nwjs/chromium.src",
"id": "38da3dc0bb332fadc6c38b39e82b09d399ded5f9",
"size": "4055",
"binary": false,
"copies": "11",
"ref": "refs/heads/nw70",
"path": "build/android/incremental_install/generate_android_manifest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from authkit.authenticate import middleware, sample_app
def valid(environ, username, password):
return username == password
app = middleware(
sample_app,
setup_method='basic',
basic_realm='Test Realm',
basic_authenticate_function=valid
)
if __name__ == '__main__':
from paste.httpserver import serve
serve(app, host='0.0.0.0', port=8080)
| {
"content_hash": "03e0c8dfc32df8ee3694e554af10fc5a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 23.375,
"alnum_prop": 0.6737967914438503,
"repo_name": "nakato/AuthKit",
"id": "42219c8eec98ed0d348969e3107556eb79c849b9",
"size": "374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/docs/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "320195"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.test.client import Client
from apps.canvas_auth.models import User, AnonymousUser
from canvas.models import EmailUnsubscribe
from canvas.tests.tests_helpers import CanvasTestCase, NotOkay, create_content, create_user, create_group, redis, PASSWORD
from forms import EmailChangeForm, PasswordChangeForm, SubscriptionForm, SecureOnlyForm
from apps.user_settings.models import EmailConfirmation
class TestEmailConfirmation(CanvasTestCase):
def setUp(self):
super(TestEmailConfirmation, self).setUp()
self.old_email = '[email protected]'
self.new_email = '[email protected]'
def test_change_and_confirm(self):
user = create_user(email=self.old_email)
confirmation = user.change_email(self.new_email)
self.assertEqual(user.email, self.old_email)
# Confirm it.
confirmation = EmailConfirmation.objects.confirm_email(
confirmation.confirmation_key)
# Refresh the user object after confirming.
user = User.objects.get(pk=user.pk)
# Verify the change happened.
self.assertNotEqual(confirmation, None)
self.assertEqual(confirmation.user.pk, user.pk)
self.assertEqual(confirmation.new_email, self.new_email)
self.assertFalse(confirmation.key_expired())
self.assertEqual(user.email, self.new_email)
def test_key_changes(self):
user = create_user(email=self.old_email)
confirmation = EmailConfirmation.objects.create_confirmation(
user, self.new_email)
confirmation2 = EmailConfirmation.objects.create_confirmation(
user, 'newer' + self.new_email)
self.assertNotEqual(confirmation.confirmation_key,
confirmation2.confirmation_key)
def test_key_generation(self):
key = EmailConfirmation.objects._generate_key(self.old_email)
key2 = EmailConfirmation.objects._generate_key(self.new_email)
self.assertTrue(key is not None)
self.assertTrue(key)
self.assertNotEqual(key, key2)
def test_confirmation_email_contents(self):
user = create_user(email=self.old_email)
confirmation = user.change_email(self.new_email)
subject, msg = confirmation._generate_confirmation_email()
# Make sure it has the right links with the confirmation key in the email body.
self.assertTrue(confirmation.confirmation_key in msg)
self.assertTrue(confirmation._activate_url() in msg)
def test_confirm_page(self):
user = create_user(email=self.old_email)
confirmation = user.change_email(self.new_email)
url = confirmation._activate_url()
resp = self.get(url)
self.assertTrue(resp.status_code in [302, 200])
# Now make sure that visiting the page confirmed it.
user = User.objects.get(pk=user.pk)
self.assertEqual(user.email, self.new_email)
def test_invalid_confirm_key(self):
url = reverse('apps.user_settings.views.confirm_email', args=['foo'])
resp = self.get(url)
self.assertEqual(resp.status_code, 200)
self.assertFalse(self.new_email in resp.content)
self.assertTrue('invalid' in resp.content.lower())
def test_form(self):
user = create_user(email=self.old_email)
form = EmailChangeForm(user=user, data={'email': self.new_email})
if form.is_valid():
form.save()
# Refresh the user object.
user = User.objects.get(pk=user.pk)
# Check that a confirmation was sent.
self.assertEqual(EmailConfirmation.objects.all().count(), 1)
self.assertNotEqual(user.email, self.new_email)
def test_unchanged_email_form(self):
user = create_user(email=self.old_email)
form = EmailChangeForm(user=user, data={'email': user.email})
if form.is_valid():
form.save()
# Refresh the user object.
user = User.objects.get(pk=user.pk)
# Check that a confirmation was not sent, since the email hasn't been changed.
self.assertEqual(EmailConfirmation.objects.all().count(), 0)
def test_multiple_confirmations(self):
user = create_user(email=self.old_email)
confirmation = EmailConfirmation.objects.create_confirmation(
user, 'first' + self.new_email)
confirmation = EmailConfirmation.objects.create_confirmation(
user, self.new_email)
self.assertEqual(EmailConfirmation.objects.all().count(), 1)
self.assertEqual(EmailConfirmation.objects.all()[0].new_email, self.new_email)
def test_confirmation_cancellation(self):
user = create_user(email=self.old_email)
confirmation = EmailConfirmation.objects.create_confirmation(
user, self.new_email)
self.assertEqual(EmailConfirmation.objects.all().count(), 1)
self.get(confirmation.cancellation_url(), user=user)
self.assertEqual(EmailConfirmation.objects.all().count(), 0)
def test_wrong_user_cancellation(self):
user = create_user(email=self.old_email)
user2 = create_user(email=self.old_email)
confirmation = EmailConfirmation.objects.create_confirmation(
user, self.new_email)
self.assertEqual(EmailConfirmation.objects.all().count(), 1)
try:
resp = self.get(confirmation.cancellation_url(), user=user2)
except NotOkay, e:
resp = e.response
else:
raise Exception('Cancellation URL worked with the wrong user!')
self.assertNotEqual(resp.status_code, 200)
self.assertEqual(EmailConfirmation.objects.all().count(), 1)
def test_send_confirmation(self):
user = create_user(email=self.old_email)
confirmation = EmailConfirmation.objects.create_confirmation(
user, self.new_email)
confirmation.send_confirmation()
class TestPasswordForm(CanvasTestCase):
def test_pw_length(self):
user = create_user()
for pw, success in [('a', False,), ('a' * User.MINIMUM_PASSWORD_LENGTH, True,)]:
form = PasswordChangeForm(user=user, data={
'old_password': PASSWORD,
'new_password1': pw,
'new_password2': pw,
})
self.assertEqual(form.is_valid(), success)
def test_pw_change(self):
user = create_user()
pw = 'new_pass1'
form = PasswordChangeForm(user=user, data={
'old_password': PASSWORD,
'new_password1': pw,
'new_password2': pw,
})
form.is_valid()
form.save()
self.assertTrue(form.password_changed())
def test_pw_change_frontend_notification(self):
user = create_user()
pw = 'newpw1337'
client = self.get_client(user=user)
resp = self.post(reverse('apps.user_settings.views.user_settings'), data={
'old_password': PASSWORD,
'new_password1': pw,
'new_password2': pw,
'email': user.email,
}, user=user, client=client)
self.assertTrue(client.session.get('password_updated'))
self.assertEqual(resp.status_code, 302)
resp = self.get(reverse('apps.user_settings.views.user_settings'), client=client)
self.assertContains(resp, 'password has been updated')
class TestSubscriptionForm(CanvasTestCase):
def test_subscribe(self):
email = '[email protected]'
user = create_user(email=email)
form = SubscriptionForm(user=user, data={'newsletter': 'on'})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(EmailUnsubscribe.objects.get_or_none(email=email), None)
form = SubscriptionForm(user=user, data={})
self.assertTrue(form.is_valid())
form.save()
self.assertNotEqual(EmailUnsubscribe.objects.get_or_none(email=email), None)
def test_change_email_and_unsubscribe_at_once(self):
email = '[email protected]'
new_email = '[email protected]'
user = create_user(email=email)
# Unsubscribe while changing the address
resp = self.post(reverse('apps.user_settings.views.user_settings'), data={
'email': new_email,
}, user=user)
user=User.objects.all()[0]
# Check that the old email is unsubscribed.
EmailUnsubscribe.objects.get(email=email)
# Confirm new email.
confirmation = EmailConfirmation.objects.all()[0]
EmailConfirmation.objects.confirm_email(confirmation.confirmation_key)
# Check that the new email is now unsubscribed.
EmailUnsubscribe.objects.get(email=new_email)
class TestDisableUser(CanvasTestCase):
def test_disable(self):
user = create_user()
self.assertTrue(user.is_active)
resp = self.post(reverse('apps.user_settings.views.disable_account'), data={}, user=user)
# Refresh the user object and verify.
user = User.objects.get(pk=user.pk)
self.assertFalse(user.is_active)
self.assertRedirects(resp, '/')
class TestSecureOnly(CanvasTestCase):
def test_secure_only_form(self):
user = create_user()
resp = HttpResponse()
cookies = {}
self.assertFalse(resp.cookies.get('secure_only', False))
form = SecureOnlyForm(user, cookies, resp, data={'force_https': True})
self.assertTrue(form.is_valid())
form.save()
self.assertTrue(resp.cookies['secure_only'])
form = SecureOnlyForm(user, {'secure_only': 'true'}, resp)
self.assertTrue(form.fields['force_https'].initial)
def test_secure_only_middleware(self):
user = create_user()
client = self.get_client(user=user)
url = reverse('apps.user_settings.views.user_settings')
# First set Force HTTPS
resp = self.post(url, data={
'email': user.email,
'force_https': 'on',
}, user=user, client=client)
# Now try visiting a page without the cookie, and see that it gets set.
client = self.get_client(user=user)
resp = self.get('/', user=user, client=client)
self.assertTrue(resp.cookies.get('secure_only'))
# Unset it and check the redis value is gone.
self.assertTrue(int(user.redis.user_kv.hget('secure_only') or 0))
def do_form_post():
return self.post(url, data={
'email': user.email,
}, user=user, client=client, https=True)
resp = do_form_post()
self.assertRedirectsNoFollow(resp, 'https://testserver:80' + url)
# Mock SSL and re-POST
client.defaults['wsgi.url_scheme'] = 'https'
do_form_post()
# Now check it had an effect.
self.assertFalse(int(user.redis.user_kv.hget('secure_only') or 0))
def test_secure_only_middleware_anonymously(self):
self.assertStatus(200, '/', user=AnonymousUser())
| {
"content_hash": "f6782876b0e4c6e7c3b760a6cbab8d4d",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 122,
"avg_line_length": 39.265017667844525,
"alnum_prop": 0.6378689704823615,
"repo_name": "drawquest/drawquest-web",
"id": "4736fcd7cc4c8371b41f589c7377c4f5b6111ca2",
"size": "11112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/apps/user_settings/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search_page', '0003_auto_20160810_1856'),
]
operations = [
migrations.AddField(
model_name='searchpage',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='searchpage',
name='list_image',
field=models.ImageField(upload_to=b'icekit/listable/list_image/', blank=True, help_text=b"image to use in listings. Default image is used if this isn't given"),
),
]
| {
"content_hash": "db8ebd58602a3c7d36c992c170d55436",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 209,
"avg_line_length": 36.43478260869565,
"alnum_prop": 0.6479713603818615,
"repo_name": "ic-labs/django-icekit",
"id": "da99191e8becd7e43ef54a0d9469a121c89b9cc2",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "icekit/page_types/search_page/migrations/0004_auto_20161122_2121.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
from pygame.math import Vector3 as V3
import pygame
import thorpy
from core3d import Object3D, Path3D
from light import Light, Material
from camera import Camera
import primitivemeshes
import parameters
import drawing
import vessel
import random
from scene import Scene
import levelgen
import gamelogic
import garage
import trackdecorator
import obstacle
import scenario
from core3d import ManualObject3D
################################################################################
#music / sons (des autres aussi, fonction de distance)
#quand change de categorie:
# -bruit de foule (pendant les feux) change
#statistics
def create_vessel(color):
quality = parameters.CURRENT_QUALITY
glass = Material((0,0,0),M=(120,120,120))
rest = Material(color)
t,n,c = garage.generate_vessel(rest, glass)
w = garage.random_wing(rest)
v = vessel.Vessel(None,more_triangles=[])
#quality = power*friction
#quality = turn+max_fuel
power_rand = random.random()+0.000001
power = parameters.MIN_POWER + power_rand*(parameters.MAX_POWER-parameters.MIN_POWER)
friction = power_rand
power *= parameters.ENGINE_POWER
mass = parameters.MIN_MASS + random.random()*(parameters.MAX_MASS-parameters.MIN_MASS)
turn = parameters.MIN_TURN + random.random()*(parameters.MAX_TURN-parameters.MIN_TURN)
max_fuel = quality - turn
max_fuel = parameters.MIN_FUEL + int(max_fuel*(parameters.MAX_FUEL-parameters.MIN_FUEL))
#
v.tail = vessel.Part(t.triangles, turn, friction, mass)
v.nose = vessel.Part(n.triangles, turn, friction, mass)
v.cockpit = vessel.Part(c.triangles, turn, friction, mass)
v.lwing = vessel.Part(w[0].triangles, turn/2., friction/2., mass/2.)
v.rwing = vessel.Part(w[1].triangles, turn/2., friction/2., mass/2.)
v.engine= vessel.Engine(max_fuel, power)
v.engine.mass = mass
v.engine.turn = turn
v.engine.friction = friction
#
v.refresh_mesh()
v.rotate_around_center_y(-90)
v.compute_box3D()
v.compute_dynamics()
v.from_init_rot = V3()
v.color = rest
#
return v
def init_game(hero):
parameters.players = [gamelogic.Player() for i in range(parameters.NPLAYERS-1)]
hero_color = parameters.HERO_COLOR
hero_player = gamelogic.Player(parameters.HERO_NAME,Material(hero_color))
hero_player.points = 0
parameters.player = hero_player
parameters.players += [hero_player]
if hero is None:
hero = create_vessel(hero_color)
hero.is_hero = True
hero.mass /= 2.
hero.compute_dynamics()
hero.name = "Hero" #!!
hero.attach_to_player(hero_player,reset_color=False)
def init_scene():
## random.seed(0)
#
gara = garage.Garage()
gara.play()
gara.derotate()
#
parameters.scene = Scene()
scene = parameters.scene
scene.cam = Camera(scene.screen, fov=512, d=2, objs=[])
scene.cam.set_aa(True)
#
light_pos = V3(0,1000,-1000)
light_m = V3(20,20,20)
light_M = V3(200,200,200)
light = Light(light_pos, light_m, light_M)
scene.light = light
##hero = hero.get_splitted_copy(threshold=-2.5)
scene.hero = parameters.player.vessel
hero = scene.hero
scene.objs.append(hero)
#track
nx = random.randint(3,4)
ny = random.randint(2,4)
print("nx,ny",nx,ny)
lg = levelgen.LevelGenerator(parameters.ZFINISH,nx,ny)
rw,rh = parameters.RAILW,parameters.RAILH
possible_obstacles = [primitivemeshes.p_rectangle(0.8*rw,0.8*rh,(0,0,255),(0,0,0))]
lg.random_gen(nparts=4,objects=possible_obstacles,min_density=0.1,max_density=0.8)
track = scene.track
for o in track.obstacles:
if random.random() < 0.4:
if random.random() < 0.5:
o.rotation_x = random.randint(2,5)* (2*random.randint(0,1) - 1)
else:
o.rotation_y = random.randint(2,5)* (2*random.randint(0,1) - 1)
o.obj.set_color(Material(parameters.COLOR_ROTATING))
if random.random() < 0.5:
r = random.random()
if r < 0.1:
o.movement_x = 1
elif r < 0.2:
o.movement_y = 1
elif r < 0.25:
o.movement_x = 1
o.movement_y = 1
if o.movement_x or o.movement_y:
o.obj.set_color(Material(parameters.COLOR_MOVING))
#
deco = trackdecorator.Decorator(track,track.zfinish//500) #500
#
finish = primitivemeshes.p_rectangle(track.railw,track.railh,(0,0,0))
## for pos in track.rail_centers():
for x in range(track.nx):
for y in range(track.ny):
pos = V3(track.rails[x,y].middlepos)
pos.z = track.zfinish+5
finish.set_pos(pos)
if x%2 == 0:
if y%2 == 0:
color = (0,0,0)
else:
color = (255,255,255)
else:
if y%2 == 0:
color = (255,255,255)
else:
color = (0,0,0)
finish.set_color(Material(random.choice(color)))
scene.objs.append(finish.get_copy())
scene.track = track
scene.opponents = [create_vessel(random.choice(drawing.colors)) for i in range(2)]
scene.objs += scene.opponents
## fin = Object3D("finish.stl")
## triangles = []
## for t in fin.triangles:
## isok = True
## for v in t.vertices():
## if v.y >= 0:
## isok = False
## if isok:
## triangles.append(t)
## fin = ManualObject3D(triangles)
## fin.rotate_around_center_x(-90)
## fin.scale(30.)
## fin.set_color(Material((255,255,0)))
## fin.move(V3(0,40,track.zfinish))
## track.things_objects.append(fin)
## scene.objs += [fin]
#
scene.refresh_cam()
scene.players = [parameters.player]
near = parameters.player.get_nearest_players()
for i,o in enumerate(scene.opponents):
player = near[i]
scene.put_opponent_on_rail(o,i+1,0,25)
o.attach_to_player(player)
scene.players.append(player)
o.set_ia(100, 0.01)
hero.reinit_orientation()
hero.set_pos(parameters.HERO_POS)
scene.put_hero_on_rail(0,0)
print("end main")
scene.refresh_vessels()
scene.hud.refresh_attributes()
g = gamelogic.ShowRanking("Start list", "Go to race", scene.players)
return scene, g.goback
if __name__ == "__main__":
app = thorpy.Application((parameters.W,parameters.H))
thorpy.set_theme(parameters.THEME)
## thorpy.application.SHOW_FPS = True
screen = thorpy.get_screen()
import dialog
def launch_about():
dialog.launch_blocking_alert("Credits",
"Author: Yann Thorimbert\nLibraries used: Pygame, ThorPy (www.thorpy.org)",
transp=False)
e_bckgr.unblit_and_reblit()
DEBUG = False
def play():
## if not DEBUG:
if True:
name = thorpy.Inserter.make("Choose your name",value="Hero")
box = thorpy.make_ok_box([name])
thorpy.auto_ok(box)
box.center()
## scenario.launch(box)
thorpy.launch_blocking(box,e_bckgr)
parameters.HERO_NAME = name.get_value()
tit = thorpy.make_text("Choose vessel color")
color = thorpy.ColorSetter.make("Choose vessel color")
box = thorpy.make_ok_box([tit,color])
thorpy.auto_ok(box)
box.center()
## scenario.launch(box)
thorpy.launch_blocking(box)
parameters.HERO_COLOR = color.get_value()
print("setting", parameters.HERO_COLOR)
#
vc = gamelogic.ShowRanking("Choose a vessel", "Continue", [], False, True)
vc.derotate()
thorpy.set_theme("classic")
if not DEBUG:
scenario.launch_intro_text()
scenario.launch_intro_text2()
scenario.launch_help()
thorpy.set_theme(parameters.THEME)
init_game(vc.vessels[0])
parameters.AA = vs.get_value("aa")
parameters.VISIBILITY = vs.get_value("visibility")
while True:
parameters.flush()
while True:
scene, goback = init_scene()
if not goback:
break
reac = thorpy.ConstantReaction(thorpy.THORPY_EVENT,scene.func_time,
{"id":thorpy.constants.EVENT_TIME})
g = thorpy.Ghost.make()
parameters.ghost = g
g.add_reaction(reac)
thorpy.functions.playing(30,1000//parameters.FPS)
m = thorpy.Menu(g,fps=parameters.FPS)
m.play()
gamelogic.refresh_ranking()
cat_before,c1 = gamelogic.get_category(parameters.player.ranking-1)
sr = gamelogic.ShowRanking("Ranking", "Go to garage",
scene.get_current_ranking_players(),results=True)
gamelogic.refresh_ranking()
sr.derotate()
cat_after,c2 = gamelogic.get_category(parameters.player.ranking-1)
if c2 > c1:
thorpy.launch_blocking_alert("Your category is now "+cat_after+\
"!\nCongratulations, "+parameters.HERO_NAME+".\nYou earned an extra bonus of 500 $."+\
"\n\nThe track length in this category is 1000m longer.")
parameters.player.money += 500
parameters.ZFINISH += 1000
## parameters.ENGINE_POWER += 0.005
parameters.CURRENT_QUALITY += 0.5
elif c2<c1:
thorpy.launch_blocking_alert("Your category is now "+cat_after+\
"!\nThis is very deceptive, "+parameters.HERO_NAME+".\n\n"+\
"The track length in this category is 1000m shorter.")
parameters.ZFINISH -= 1000
parameters.CURRENT_QUALITY -= 0.5
parameters.flush()
if parameters.player.ranking == parameters.players[0].ranking:
scenario.launch_end()
if parameters.player.vessel.life <=0:
parameters.player.vessel.life = 1
parameters.player.vessel.visible = True
if random.random() < parameters.MERCHANT_PROBABILITY:
garage.buy_part(None)
e_title = thorpy.make_text("The Phantom Racer", 25, (255,0,0))
e_play = thorpy.make_button("Start new game", play)
e_disp,vs = gamelogic.get_display_options()
e_font = thorpy.make_font_options_setter("./metadata", "Font options")
e_about = thorpy.make_button("About", launch_about)
e_quit = thorpy.make_button("Quit", thorpy.functions.quit_menu_func)
elements = [e_title,e_play,e_disp,e_font,e_about,e_quit]
background = thorpy.load_image("PaulinaRiva.png")
background = thorpy.get_resized_image(background,
(parameters.W,parameters.H//2),
type_=max)
e_bckgr = thorpy.Background.make(image=background,elements=elements)
thorpy.store(e_bckgr)
e_title.move((0,-50))
m = thorpy.Menu(e_bckgr)
m.play()
app.quit()
#si autres bugs d'affichages : if len(p) == len(thing.points): dans draw...
#be careful:
# cam need to know all!!!! (for moving objects)
##OverflowError: signed short integer is greater than maximum
# ==> si continue, faire comme pour Object3D avec control des val abs
#voir si refresh() de object 3d ferait pas mieux d'utiliser version GRU (cf refresh)
| {
"content_hash": "1ea9cd5996d3a88ba828f4f78a19f8ab",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 106,
"avg_line_length": 37.108626198083066,
"alnum_prop": 0.5862247094274645,
"repo_name": "YannThorimbert/ThePhantomRacer",
"id": "bd084a7b7433d6abc0395f39525cdeb855c8b155",
"size": "11615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148055"
}
],
"symlink_target": ""
} |
from suds import *
from suds.sax import Namespace, splitPrefix
def qualify(ref, resolvers, defns=Namespace.default):
"""
Get a reference that is I{qualified} by namespace.
@param ref: A referenced schema type name.
@type ref: str
@param resolvers: A list of objects to be used to resolve types.
@type resolvers: [L{sax.element.Element},]
@param defns: An optional target namespace used to qualify references
when no prefix is specified.
@type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed.
@return: A qualified reference.
@rtype: (name, namespace-uri)
"""
ns = None
p, n = splitPrefix(ref)
if p is not None:
if not isinstance(resolvers, (list, tuple)):
resolvers = (resolvers,)
for r in resolvers:
resolved = r.resolvePrefix(p)
if resolved[1] is not None:
ns = resolved
break
if ns is None:
raise Exception('prefix (%s) not resolved' % p)
else:
ns = defns
return (n, ns[1])
def isqref(object):
"""
Get whether the object is a I{qualified reference}.
@param object: An object to be tested.
@type object: I{any}
@rtype: boolean
@see: L{qualify}
"""
return (\
isinstance(object, tuple) and \
len(object) == 2 and \
isinstance(object[0], basestring) and \
isinstance(object[1], basestring))
class Filter:
def __init__(self, inclusive=False, *items):
self.inclusive = inclusive
self.items = items
def __contains__(self, x):
if self.inclusive:
result = ( x in self.items )
else:
result = ( x not in self.items )
return result
| {
"content_hash": "b9907eb81d5d679f7e8cf779a8f8912b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 87,
"avg_line_length": 30.620689655172413,
"alnum_prop": 0.5940315315315315,
"repo_name": "c2theg/DDoS_Information_Sharing",
"id": "c5d801543bcc04b24b908eb1e096bb351bae4458",
"size": "2609",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "libraries/suds-jurko-0.6/suds/xsd/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29713"
},
{
"name": "HTML",
"bytes": "36245"
},
{
"name": "JavaScript",
"bytes": "931"
},
{
"name": "Python",
"bytes": "851500"
},
{
"name": "Shell",
"bytes": "8895"
}
],
"symlink_target": ""
} |
"""
Microservice module
This module contains the microservice code for
server
models
"""
from flask import Flask
# Create the Flask app
app = Flask(__name__)
app.config.from_object('config')
import server
import models | {
"content_hash": "6884e1ba6657c5a9a436041870887d5c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 46,
"avg_line_length": 15.266666666666667,
"alnum_prop": 0.7336244541484717,
"repo_name": "NYU-Foxtrot/inventory",
"id": "b0503da16b533905d6dc3ef85d6c7c50bb0611a4",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "4194"
},
{
"name": "HTML",
"bytes": "5030"
},
{
"name": "JavaScript",
"bytes": "11058"
},
{
"name": "Python",
"bytes": "52725"
}
],
"symlink_target": ""
} |
import unittest
from typing import cast
from pyspark.sql.functions import array, explode, col, lit, udf, pandas_udf
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.sql.utils import IllegalArgumentException, PythonException
from pyspark.testing.sqlutils import (
ReusedSQLTestCase,
have_pandas,
have_pyarrow,
pandas_requirement_message,
pyarrow_requirement_message,
)
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
cast(str, pandas_requirement_message or pyarrow_requirement_message),
)
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return (
self.spark.range(10)
.toDF("id")
.withColumn("ks", array([lit(i) for i in range(20, 30)]))
.withColumn("k", explode(col("ks")))
.withColumn("v", col("k") * 10)
.drop("ks")
)
@property
def data2(self):
return (
self.spark.range(10)
.toDF("id")
.withColumn("ks", array([lit(i) for i in range(20, 30)]))
.withColumn("k", explode(col("ks")))
.withColumn("v2", col("k") * 100)
.drop("ks")
)
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn("v3", lit("a"))
self._test_merge(self.data1, right, "id long, k int, v int, v2 int, v3 string")
def test_different_keys(self):
left = self.data1
right = self.data2
def merge_pandas(lft, rgt):
return pd.merge(lft.rename(columns={"id2": "id"}), rgt, on=["id", "k"])
result = (
left.withColumnRenamed("id", "id2")
.groupby("id2")
.cogroup(right.groupby("id"))
.applyInPandas(merge_pandas, "id long, k int, v int, v2 int")
.sort(["id", "k"])
.toPandas()
)
left = left.toPandas()
right = right.toPandas()
expected = pd.merge(left, right, on=["id", "k"]).sort_values(by=["id", "k"])
assert_frame_equal(expected, result)
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({"id": [1, 2, 3], "k": [5, 6, 7], "v": [9, 10, 11]})
right = pd.DataFrame.from_dict({"id": [11, 12, 13], "k": [5, 6, 7], "v2": [90, 100, 110]})
left_gdf = self.spark.createDataFrame(left).groupby(col("id") % 2 == 0)
right_gdf = self.spark.createDataFrame(right).groupby(col("id") % 2 == 0)
def merge_pandas(lft, rgt):
return pd.merge(lft[["k", "v"]], rgt[["k", "v2"]], on=["k"])
result = (
left_gdf.cogroup(right_gdf)
.applyInPandas(merge_pandas, "k long, v long, v2 long")
.sort(["k"])
.toPandas()
)
expected = pd.DataFrame.from_dict({"k": [5, 6, 7], "v": [9, 10, 11], "v2": [90, 100, 110]})
assert_frame_equal(expected, result)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(lft, rgt):
return pd.merge(lft, rgt, on=["id", "k"])
result = (
left.groupby()
.cogroup(right.groupby())
.applyInPandas(merge_pandas, "id long, k int, v int, v2 int")
.sort(["id", "k"])
.toPandas()
)
left = left.toPandas()
right = right.toPandas()
expected = pd.merge(left, right, on=["id", "k"]).sort_values(by=["id", "k"])
assert_frame_equal(expected, result)
def test_different_group_key_cardinality(self):
left = self.data1
right = self.data2
def merge_pandas(lft, _):
return lft
with QuietTest(self.sc):
with self.assertRaisesRegex(
IllegalArgumentException,
"requirement failed: Cogroup keys must have same size: 2 != 1",
):
(left.groupby("id", "k").cogroup(right.groupby("id"))).applyInPandas(
merge_pandas, "id long, k int, v int"
)
def test_apply_in_pandas_not_returning_pandas_dataframe(self):
left = self.data1
right = self.data2
def merge_pandas(lft, rgt):
return lft.size + rgt.size
with QuietTest(self.sc):
with self.assertRaisesRegex(
PythonException,
"Return type of the user-defined function should be pandas.DataFrame, "
"but is <class 'numpy.int64'>",
):
(
left.groupby("id")
.cogroup(right.groupby("id"))
.applyInPandas(merge_pandas, "id long, k int, v int, v2 int")
.collect()
)
def test_apply_in_pandas_returning_wrong_number_of_columns(self):
left = self.data1
right = self.data2
def merge_pandas(lft, rgt):
if 0 in lft["id"] and lft["id"][0] % 2 == 0:
lft["add"] = 0
if 0 in rgt["id"] and rgt["id"][0] % 3 == 0:
rgt["more"] = 1
return pd.merge(lft, rgt, on=["id", "k"])
with QuietTest(self.sc):
with self.assertRaisesRegex(
PythonException,
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. Expected: 4 Actual: 6",
):
(
# merge_pandas returns two columns for even keys while we set schema to four
left.groupby("id")
.cogroup(right.groupby("id"))
.applyInPandas(merge_pandas, "id long, k int, v int, v2 int")
.collect()
)
def test_apply_in_pandas_returning_empty_dataframe(self):
left = self.data1
right = self.data2
def merge_pandas(lft, rgt):
if 0 in lft["id"] and lft["id"][0] % 2 == 0:
return pd.DataFrame([])
if 0 in rgt["id"] and rgt["id"][0] % 3 == 0:
return pd.DataFrame([])
return pd.merge(lft, rgt, on=["id", "k"])
result = (
left.groupby("id")
.cogroup(right.groupby("id"))
.applyInPandas(merge_pandas, "id long, k int, v int, v2 int")
.sort(["id", "k"])
.toPandas()
)
left = left.toPandas()
right = right.toPandas()
expected = pd.merge(
left[left["id"] % 2 != 0], right[right["id"] % 3 != 0], on=["id", "k"]
).sort_values(by=["id", "k"])
assert_frame_equal(expected, result)
def test_apply_in_pandas_returning_empty_dataframe_and_wrong_number_of_columns(self):
left = self.data1
right = self.data2
def merge_pandas(lft, rgt):
if 0 in lft["id"] and lft["id"][0] % 2 == 0:
return pd.DataFrame([], columns=["id", "k"])
return pd.merge(lft, rgt, on=["id", "k"])
with QuietTest(self.sc):
with self.assertRaisesRegex(
PythonException,
"Number of columns of the returned pandas.DataFrame doesn't "
"match specified schema. Expected: 4 Actual: 2",
):
(
# merge_pandas returns two columns for even keys while we set schema to four
left.groupby("id")
.cogroup(right.groupby("id"))
.applyInPandas(merge_pandas, "id long, k int, v int, v2 int")
.collect()
)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF("v1")
df = df.withColumn("v2", udf(lambda x: x + 1, "int")(df["v1"])).withColumn(
"v3", pandas_udf(lambda x: x + 2, "int")(df["v1"])
)
result = (
df.groupby()
.cogroup(df.groupby())
.applyInPandas(
lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]), "sum1 int, sum2 int"
)
.collect()
)
self.assertEqual(result[0]["sum1"], 165)
self.assertEqual(result[0]["sum2"], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, lft, _):
return lft.assign(key=key[0])
result = (
self.data1.groupby(col("id") % 2 == 0)
.cogroup(self.data2.groupby(col("id") % 2 == 0))
.applyInPandas(left_assign_key, "id long, k int, v int, key boolean")
.sort(["id", "k"])
.toPandas()
)
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegex(
NotImplementedError, "Invalid return type.*ArrayType.*TimestampType"
):
left.groupby("id").cogroup(right.groupby("id")).applyInPandas(
lambda l, r: l, "id long, v array<timestamp>"
)
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegex(ValueError, "Invalid function"):
left.groupby("id").cogroup(right.groupby("id")).applyInPandas(
lambda: 1, StructType([StructField("d", DoubleType())])
)
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = (
df1.groupby("ColUmn")
.cogroup(df1.groupby("COLUMN"))
.applyInPandas(lambda r, l: r + l, "column long, value long")
.first()
)
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = (
df1.groupby("ColUmn")
.cogroup(df2.groupby("COLUMN"))
.applyInPandas(lambda r, l: r + l, "column long, value long")
.first()
)
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
def test_self_join(self):
# SPARK-34319: self-join with FlatMapCoGroupsInPandas
df = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = (
df.groupby("ColUmn")
.cogroup(df.groupby("COLUMN"))
.applyInPandas(lambda r, l: r + l, "column long, value long")
)
row = row.join(row).first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, lft, rgt):
return lft.assign(key=key[0]) if isLeft else rgt.assign(key=key[0])
result = (
left.groupby("id")
.cogroup(right.groupby("id"))
.applyInPandas(right_assign_key, "id long, k int, v int, key long")
.toPandas()
)
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result)
@staticmethod
def _test_merge(left, right, output_schema="id long, k int, v int, v2 int"):
def merge_pandas(lft, rgt):
return pd.merge(lft, rgt, on=["id", "k"])
result = (
left.groupby("id")
.cogroup(right.groupby("id"))
.applyInPandas(merge_pandas, output_schema)
.sort(["id", "k"])
.toPandas()
)
left = left.toPandas()
right = right.toPandas()
expected = pd.merge(left, right, on=["id", "k"]).sort_values(by=["id", "k"])
assert_frame_equal(expected, result)
if __name__ == "__main__":
from pyspark.sql.tests.pandas.test_pandas_cogrouped_map import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "5adf8d431e93bd3b35a7bc16e30633aa",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 99,
"avg_line_length": 33.7964824120603,
"alnum_prop": 0.5339379971749312,
"repo_name": "srowen/spark",
"id": "b3f4c7331d162b4686c232ef3961a2b199c03603",
"size": "14236",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "61530"
},
{
"name": "Batchfile",
"bytes": "27482"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26338"
},
{
"name": "Dockerfile",
"bytes": "16279"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1859465"
},
{
"name": "Java",
"bytes": "4753582"
},
{
"name": "JavaScript",
"bytes": "223014"
},
{
"name": "Jupyter Notebook",
"bytes": "4310512"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "PLpgSQL",
"bytes": "352609"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "8550026"
},
{
"name": "R",
"bytes": "1287477"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "32470"
},
{
"name": "Scala",
"bytes": "44596827"
},
{
"name": "Shell",
"bytes": "245400"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "111129"
}
],
"symlink_target": ""
} |
import json
from jsonapp.utils.json_util import JSONUtil
class String(object):
def __init__(self, string):
self._string = string
@property
def string(self):
"""
:rtype: str
"""
return self._string
def set(self, value):
self._string = value
def to_dict(self):
return json.loads(self.string)
def to_pretty_json_string(self):
return json.dumps(self.to_odict(), indent=4)
def to_odict(self):
json_object = json.loads(self.string)
if type(json_object) == dict:
json_odict = JSONUtil(self.string).to_odict()
return json_odict
else:
return json_object
@property
def lines(self):
return self.string.split('\n')
@property
def first_line(self):
return self.lines[0]
@property
def without_whitespace(self):
import re
updated_string = re.sub('[\s+]', '', self.string)
return updated_string
def remove_first_line(self):
updated_string = '\n'.join(self.lines[1:])
self.set(updated_string)
def to_number_with_commas(self):
number_with_commas = "{:,}".format(self.string)
return number_with_commas
@property
def is_url(self):
from urlparse import urlparse
try:
result = urlparse(self.string)
if result.scheme == '':
return False
result = True if [result.scheme, result.netloc, result.path] else False
except ValueError:
result = False
return result
def multi_command_template(command_template, separator=False, continue_on_error=False):
if separator:
separator_string = "echo; echo '-----------------------------------'; echo"
replacement = ' &&\n' + separator_string + '\n'
result = command_template.strip().replace('\n', replacement)
else:
if continue_on_error:
result = command_template.strip().replace('\n', ' ;\n')
else:
result = command_template.strip().replace('\n', ' &&\n')
return result
multiple_commands_template = multi_command_template
def text_with_space(text):
result = '\n{text}\n'.format(text=text)
return result
def text_with_bottom_space(text):
result = '{text}\n'.format(text=text)
return result
def print_with_space(text):
print(text_with_space(text))
def print_with_bottom_space(text):
print(text_with_bottom_space(text))
def print_command(command):
print_with_space('Command:\n{command}'.format(command=command))
def print_not_implemented():
print('Not implemented.')
def print_lines(string_list):
for string in string_list:
print(string)
| {
"content_hash": "5471de8b289feddc531b8cb597334617",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 87,
"avg_line_length": 19.24137931034483,
"alnum_prop": 0.5849462365591398,
"repo_name": "eyalev/jsonapp",
"id": "1e5c500928c8ef167eb490cdf6d3e76117aabe4f",
"size": "2790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonapp/utils/string_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "20816"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from mock import call, patch
import pytest
from boxsdk.config import API
from boxsdk.object.group_membership import GroupMembership
from boxsdk.util.shared_link import get_shared_link_header
@pytest.fixture
def shared_link():
return 'https://app.box.com/s/q2i1024dvguiads6mzj2avsq9hmz43du'
@pytest.fixture(params=(None, 'shared_link_password'))
def shared_link_password(request):
return request.param
@pytest.fixture
def box_api_headers(shared_link, shared_link_password, access_token):
# pylint:disable=redefined-outer-name
box_api_header = get_shared_link_header(shared_link, shared_link_password)['BoxApi']
return {'Authorization': 'Bearer {0}'.format(access_token), 'BoxApi': box_api_header}
def test_client_with_shared_link_causes_box_api_header_to_be_added(
box_client,
mock_box_network,
generic_successful_response,
shared_link,
shared_link_password,
box_api_headers,
):
# pylint:disable=redefined-outer-name
mock_box_network.session.request.side_effect = [
generic_successful_response,
]
box_client.with_shared_link(shared_link, shared_link_password).folder('0').get()
assert mock_box_network.session.request.mock_calls == [
call(
'GET',
'{0}/folders/0'.format(API.BASE_API_URL),
headers=box_api_headers,
params=None,
),
]
def test_folder_object_with_shared_link_causes_box_api_header_to_be_added(
box_client,
mock_box_network,
generic_successful_response,
shared_link,
shared_link_password,
box_api_headers,
):
# pylint:disable=redefined-outer-name
mock_box_network.session.request.side_effect = [
generic_successful_response,
]
box_client.folder('0').with_shared_link(shared_link, shared_link_password).get()
assert mock_box_network.session.request.mock_calls == [
call(
'GET',
'{0}/folders/0'.format(API.BASE_API_URL),
headers=box_api_headers,
params=None,
),
]
def test_group_membership_object_with_shared_link_causes_box_api_header_to_be_added(
box_client,
mock_box_network,
generic_successful_response,
shared_link,
shared_link_password,
box_api_headers,
):
# pylint:disable=redefined-outer-name
mock_box_network.session.request.side_effect = [
generic_successful_response,
]
with patch.object(GroupMembership, '_init_user_and_group_instances') as init:
init.return_value = None, None
box_client.group_membership('0').with_shared_link(shared_link, shared_link_password).get()
assert mock_box_network.session.request.mock_calls == [
call(
'GET',
'{0}/group_memberships/0'.format(API.BASE_API_URL),
headers=box_api_headers,
params=None,
),
]
def test_events_endpoint_with_shared_link_causes_box_api_header_to_be_added(
box_client,
mock_box_network,
generic_successful_response,
shared_link,
shared_link_password,
box_api_headers,
):
# pylint:disable=redefined-outer-name
mock_box_network.session.request.side_effect = [
generic_successful_response,
]
stream_position = 1348790499819
options = {'url': '{0}/events'.format(API.BASE_API_URL), 'retry_timeout': 60}
box_client.events().with_shared_link(shared_link, shared_link_password).long_poll(options, stream_position)
assert mock_box_network.session.request.mock_calls == [
call(
'GET',
options['url'],
headers=box_api_headers,
timeout=options['retry_timeout'],
params={'stream_position': stream_position},
),
]
def test_metadata_endpoint_with_shared_link_causes_box_api_header_to_be_added(
box_client,
mock_box_network,
generic_successful_response,
shared_link,
shared_link_password,
box_api_headers,
):
# pylint:disable=redefined-outer-name
mock_box_network.session.request.side_effect = [
generic_successful_response,
]
box_client.file('0').metadata().with_shared_link(shared_link, shared_link_password).get()
assert mock_box_network.session.request.mock_calls == [
call(
'GET',
'{0}/files/0/metadata/global/properties'.format(API.BASE_API_URL),
headers=box_api_headers,
),
]
| {
"content_hash": "663b869a06098cc70bfb8dafb25c4b2d",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 111,
"avg_line_length": 32.12676056338028,
"alnum_prop": 0.637001315212626,
"repo_name": "Tusky/box-python-sdk",
"id": "5fe63335829bd8e6e8b370d5c70b44065d01c834",
"size": "4579",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/integration/test_with_shared_link.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "348572"
},
{
"name": "Smarty",
"bytes": "527"
}
],
"symlink_target": ""
} |
__author__ = 'hwang'
import pandas as pd
import numpy as np
import random as rd
from pyspark import SparkContext, SparkConf
import time
def tick_list(file):
symbols = pd.read_csv(file,sep='\t', header=None)
ls_symbols = symbols[0]
ls_symbols = ls_symbols.tolist()
return ls_symbols
def baseRoutine1(ticks, sc):
# routine for deleting tick which didn't change here
allTick = ticks # all tickers requested
someTick = ticks # after checking, tickers which need to read again
# 1. reading file again to Close dictionary containig RDD of close price
Close_FM_rdd_dict={}# dict RDD: storing all 5 min close price of someTick as in float
Close_daily_rdd_dict={} # dict RDD: storing all close 1 min price of tickers as in float
Close_rdd={} # dict RDD: storing all close price of someTick as in unicode
Close_float_rdd={} # dict RDD: strong all close price of someTick as in float
Close={} # dict when creating dataframe for input of getVaR()
ret_float_rdd={} # dict RDD: strong all ret of target someTick as in float
ret_dict={} # dict : strong all ret of target someTick
lenOfret_ls=[]
#for t in ticks:
# fileLoc = "hdfs://master:8020/user/hwang/data/" + t
# Close_rdd[t] = sc.textFile(fileLoc).cache()
for t in ticks:
fileLoc = "hdfs://master:8020/user/hwang/data/" + t
# create rdd which contains list of prices in float type
Close_FM_rdd_dict[t] = sc.textFile(fileLoc).cache()
# Collect daily data, slice it to 5 min data then combine it to Close_rdd_dict[t] RDD.
fileLoc = "hdfs://master:8020/user/hwang/dailyData/" + t
Close_daily_rdd_dict[t] = sc.textFile(fileLoc).cache()
Close_daily_rdd_dict[t] = sc.parallelize(Close_daily_rdd_dict[t].collect()[::5])
Close_rdd[t] = Close_FM_rdd_dict[t] + Close_daily_rdd_dict[t]
# 2. create list of price per tick
price=[] # price
for x in Close_rdd[t].take(Close_rdd[t].count()):
price.append(float(x))
price_arr = np.array(price)
price_ls = price_arr.tolist()
ret_arr = (price_arr[1:] - price_arr[:-1])/price_arr[:-1]
ret_ls = ret_arr.tolist()
# 3. create new rdd from list above
Close_float_rdd[t] = sc.parallelize(price_ls)
#Close[t] = price[-500:]
Close[t] = price
ret_float_rdd[t] = sc.parallelize(ret_ls)
#ret_dict[t] = ret_ls[-500:]
ret_dict[t] = ret_ls
lenOfret_ls.append(len(ret_ls))
# end of for
# 4. create inputs for getVar function
# allTick: price, positions_ls, ret_target_df
n = min(lenOfret_ls)
ret_dict_n={} # storing same number of returns of each ticker
Close_n={} # storing same number of price of each ticker
for t in ticks:
ret_dict_n[t]= ret_dict[t][-n:]
Close_n[t] = Close[t][-n:]
ret_target_df =pd.DataFrame.from_dict(ret_dict_n)
#ret_target_df = pd.DataFrame.from_dict(ret_dict)
price_df = pd.DataFrame(Close_n)
positions_ls= (np.ones(len(ticks))*100).tolist()
[a, b] = getVaR(ticks, price_df, positions_ls, ret_target_df, 10000)
return [a, b]
def getVaR(ticker, price, positions_ls, ret_target_df, no_simul):
i=0
exposures = []
for t in (ticker):
exposures.append(positions_ls[i]*price[t][len(price[t])-1])
i +=1
all_MC_df = pd.DataFrame(index=range(no_simul), columns=ticker)
MC_port = []
for i in range(no_simul):
ret_MC = []
# MC return for each assets
for t in ticker:
ret_MC.append(np.mean(ret_target_df[t]) + np.std(ret_target_df[t]) * rd.gauss(0,1))
# Compute the exposure * return for each asset
MC_each_ls = []
for k in range(len(ticker)):
MC_each_ls.append(exposures[k]*ret_MC[k])
all_MC_df.loc[i] = MC_each_ls
# Sum --> total portfolio value at time i
MC_port.append(np.sum(all_MC_df.loc[i]))
temp = ret_target_df.corr()
corrM = np.matrix(temp)
all_sorted_MC_df = pd.DataFrame(pd.concat([all_MC_df[col].order().reset_index(drop=True) for col in all_MC_df], axis=1, ignore_index=True))
all_sorted_MC_df.columns = ticker
temp_ls = []
for t in ticker:
temp_ls.append(-all_sorted_MC_df[t].values[no_simul/20])
temp_var = np.matrix(np.transpose(np.array(temp_ls)))
VaR_each = pd.DataFrame(temp_var, index=range(1), columns=ticker)
temp_ls = np.array(temp_ls)
MC_mat = np.matrix(temp_ls)
VaR95 = np.sqrt(MC_mat*corrM*np.transpose(MC_mat))
VaR95 = VaR95.tolist()
VaR95 = VaR95[0]
return VaR_each, VaR95
appName ='Routine5-01'
conf = SparkConf().setAppName(appName).setMaster('spark://master:7077').set("spark.executor.memory", "1g")
sc = SparkContext(conf=conf)
sym = tick_list("./sp500")
sym100 = sym[-100:]
start_time = time.time()
#[a, b] = baseRoutine1(['AA', 'AAPL'], sc)
#[VaR, VaR_Total, Close_float_rdd, ret_float_rdd] = baseRoutine1(sym, sc)
[a, b] = baseRoutine1(sym100, sc)
print a
print b
end_time= time.time()
print('Duration: {}'.format(end_time - start_time))
| {
"content_hash": "0b393e51094ae7ea5773e1dd712c644d",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 143,
"avg_line_length": 36.89208633093525,
"alnum_prop": 0.625780031201248,
"repo_name": "Sapphirine/Real-time-Risk-Management-System",
"id": "b69bbda3072b1fd4287e4de53a769df8fb9692f9",
"size": "5128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GetVar_least_spark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "Java",
"bytes": "5007"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "Python",
"bytes": "10530"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.template.defaultfilters import floatformat # noqa
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils import csvbase
from openstack_dashboard import api
from openstack_dashboard import usage
from horizon import messages
from horizon import API
class GlobalUsageCsvRenderer(csvbase.BaseCsvResponse):
if getattr(settings, 'METERING_ENABLED', False):
columns = [_("Project Name"), _("VCPUs"), _("RAM (MB)"),
_("Disk (GB)"), _("Usage (Hours)"), _("VCPU Costs"), _("Memory Costs"), _("Disk Costs")]
else:
columns = [_("Project Name"), _("VCPUs"), _("RAM (MB)"),
_("Disk (GB)"), _("Usage (Hours)")]
def get_row_data(self):
if getattr(settings, 'METERING_ENABLED', False):
for u in self.context['usage'].usage_list:
yield (u.project_name or u.tenant_id,
u.vcpus,
u.memory_mb,
u.local_gb,
floatformat(u.vcpu_hours, 2),
floatformat(u.vcpu_costs, 2),
floatformat(u.memory_costs, 2),
floatformat(u.disk_costs,2))
else:
for u in self.context['usage'].usage_list:
yield (u.project_name or u.tenant_id,
u.vcpus,
u.memory_mb,
u.local_gb,
floatformat(u.vcpu_hours, 2))
class GlobalOverview(usage.UsageView):
table_class = usage.GlobalUsageTable
usage_class = usage.GlobalUsage
template_name = 'admin/overview/usage.html'
csv_response_class = GlobalUsageCsvRenderer
def get_context_data(self, **kwargs):
context = super(GlobalOverview, self).get_context_data(**kwargs)
context['monitoring'] = getattr(settings, 'EXTERNAL_MONITORING', [])
request = self.request
if getattr(settings, 'METERING_ENABLED', False):
ifUpdatePrice = request.GET.get("update_price_name")
prices = API.getPrice()
vcpu_per_hour_price = prices[0]
memory_per_hour_price = prices[1]
disk_per_hour_price = prices[2]
currency = prices[3]
if ifUpdatePrice == 'update':
vcpu_per_hour_price = request.GET.get("vcpu_per_hour_price")
memory_per_hour_price = request.GET.get("memory_per_hour_price")
disk_per_hour_price = request.GET.get("disk_per_hour_price")
currency = request.GET.get("currencyName")
isNum = True
try:
float(vcpu_per_hour_price)
float(memory_per_hour_price)
float(disk_per_hour_price)
except(ValueError):
isNum = False
if isNum:
flag = API.updatePrice(vcpu_per_hour_price,memory_per_hour_price,disk_per_hour_price,currency)
if flag:
messages.success(request,_("Update successfully!"))
else:
vcpu_per_hour_price = prices[0]
memory_per_hour_price = prices[1]
disk_per_hour_price = prices[2]
currency = prices[3]
messages.error(request,_("Update failed!"))
else:
vcpu_per_hour_price = prices[0]
memory_per_hour_price = prices[1]
disk_per_hour_price = prices[2]
currency = prices[3]
messages.error(request,_("Please input correct value!"))
context['vcpu_per_hour_price'] = vcpu_per_hour_price
context['memory_per_hour_price'] = memory_per_hour_price
context['disk_per_hour_price'] = disk_per_hour_price
context['currency'] = currency
context['METERING_ENABLED'] = getattr(settings,
'METERING_ENABLED',
False)
return context
def get_data(self):
data = super(GlobalOverview, self).get_data()
# Pre-fill project names
try:
projects, has_more = api.keystone.tenant_list(self.request)
except Exception:
projects = []
exceptions.handle(self.request,
_('Unable to retrieve project list.'))
for instance in data:
project = filter(lambda t: t.id == instance.tenant_id, projects)
# If we could not get the project name, show the tenant_id with
# a 'Deleted' identifier instead.
if project:
instance.project_name = getattr(project[0], "name", None)
else:
deleted = _("Deleted")
instance.project_name = translation.string_concat(
instance.tenant_id, " (", deleted, ")")
return data
| {
"content_hash": "2f66f4e547a8ceb34578e7a6670c401a",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 114,
"avg_line_length": 40.76984126984127,
"alnum_prop": 0.5298812536499903,
"repo_name": "wangxiangyu/horizon",
"id": "07b0015e81766437ebf653029bdcbd2395e41252",
"size": "5901",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable/kilo",
"path": "openstack_dashboard/dashboards/admin/overview/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "396254"
},
{
"name": "HTML",
"bytes": "501148"
},
{
"name": "JavaScript",
"bytes": "560576"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4657152"
},
{
"name": "Shell",
"bytes": "18299"
}
],
"symlink_target": ""
} |
from setuptools import setup
from spanner import __version__
setup(
name='spanner',
version=__version__,
description='An accumulation of utilities / convenience functions for python',
author='Bryan Johnson',
author_email='[email protected]',
packages=['spanner'],
url='https://github.com/dbjohnson/python-utils',
download_url='https://github.com/dbjohnson/spanner/tarball/%s' % __version__,
install_requires=['xlrd>=0.9.3', 'xlwt>=0.7.5']
)
| {
"content_hash": "4832b38a65018439b3bb88809471b36f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 31.5,
"alnum_prop": 0.6567460317460317,
"repo_name": "dbjohnson/spanner",
"id": "7a7b9a65d01b23ba0b379382468b3e4967730ae8",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28631"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import graph_editor as ge
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SubgraphTest(test.TestCase):
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
def test_subgraph(self):
sgv = ge.sgv(self.graph)
self.assertEqual(list(sgv.outputs), [self.e, self.h])
self.assertEqual(list(sgv.inputs), [])
self.assertEqual(len(sgv.ops), 8)
sgv = ge.sgv(self.f.op, self.g.op)
self.assertEqual(list(sgv.outputs), [self.f, self.g])
self.assertEqual(list(sgv.inputs), [self.c, self.d, self.a])
sgv = ge.sgv_scope("foo/bar", graph=self.graph)
self.assertEqual(
list(sgv.ops), [self.e.op, self.f.op, self.g.op, self.h.op])
def test_subgraph_remap(self):
sgv = ge.sgv(self.c.op)
self.assertEqual(list(sgv.outputs), [self.c])
self.assertEqual(list(sgv.inputs), [self.a, self.b])
sgv = sgv.remap_outputs_to_consumers()
self.assertEqual(list(sgv.outputs), [self.c, self.c, self.c])
sgv = sgv.remap_outputs_make_unique()
self.assertEqual(list(sgv.outputs), [self.c])
sgv = sgv.remap(new_input_indices=[], new_output_indices=[])
self.assertEqual(len(sgv.inputs), 0)
self.assertEqual(len(sgv.outputs), 0)
sgv = sgv.remap_default()
self.assertEqual(list(sgv.outputs), [self.c])
self.assertEqual(list(sgv.inputs), [self.a, self.b])
def test_remove_unused_ops(self):
sgv = ge.sgv(self.graph)
self.assertEqual(list(sgv.outputs), [self.e, self.h])
self.assertEqual(len(sgv.ops), 8)
sgv = sgv.remap_outputs(new_output_indices=[1]).remove_unused_ops()
self.assertEqual(list(sgv.outputs), [self.h])
self.assertEqual(len(sgv.ops), 7)
if __name__ == "__main__":
test.main()
| {
"content_hash": "4d5b5c62ad5857e88d63323b7b8a6e5c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 71,
"avg_line_length": 36.90277777777778,
"alnum_prop": 0.6450884456153556,
"repo_name": "krikru/tensorflow-opencl",
"id": "e98cce0b026b13ceb176ddfcd411479211c1de98",
"size": "3346",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/graph_editor/tests/subgraph_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "128013"
},
{
"name": "C++",
"bytes": "20161163"
},
{
"name": "CMake",
"bytes": "112204"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96872"
},
{
"name": "HTML",
"bytes": "534896"
},
{
"name": "Java",
"bytes": "215285"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833593"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "29661"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "183901"
},
{
"name": "Python",
"bytes": "16291238"
},
{
"name": "Shell",
"bytes": "314152"
},
{
"name": "TypeScript",
"bytes": "761620"
}
],
"symlink_target": ""
} |
"""
Test scalar buffer interface adheres to PEP 3118
"""
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import get_buffer_info
import pytest
from numpy.testing import assert_, assert_equal, assert_raises
# PEP3118 format strings for native (standard alignment and byteorder) types
scalars_and_codes = [
(np.bool_, '?'),
(np.byte, 'b'),
(np.short, 'h'),
(np.intc, 'i'),
(np.int_, 'l'),
(np.longlong, 'q'),
(np.ubyte, 'B'),
(np.ushort, 'H'),
(np.uintc, 'I'),
(np.uint, 'L'),
(np.ulonglong, 'Q'),
(np.half, 'e'),
(np.single, 'f'),
(np.double, 'd'),
(np.longdouble, 'g'),
(np.csingle, 'Zf'),
(np.cdouble, 'Zd'),
(np.clongdouble, 'Zg'),
]
scalars_only, codes_only = zip(*scalars_and_codes)
class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
x = scalar()
a = np.array([], dtype=np.dtype(scalar))
mv_x = memoryview(x)
mv_a = memoryview(a)
assert_equal(mv_x.format, mv_a.format)
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_dim(self, scalar):
x = scalar()
mv_x = memoryview(x)
assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
@pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
def test_scalar_code_and_properties(self, scalar, code):
x = scalar()
expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
shape=(), format=code, readonly=True)
mv_x = memoryview(x)
assert self._as_dict(mv_x) == expected
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_buffers_readonly(self, scalar):
x = scalar()
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
assert_(isinstance(x, np.void))
mv_x = memoryview(x)
expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
expected_size += 2 * np.dtype(np.float64).itemsize
assert_equal(mv_x.itemsize, expected_size)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
# check scalar format string against ndarray format string
a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_(isinstance(a, np.ndarray))
mv_a = memoryview(a)
assert_equal(mv_x.itemsize, mv_a.itemsize)
assert_equal(mv_x.format, mv_a.format)
# Check that we do not allow writeable buffer export (technically
# we could allow it sometimes here...)
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def _as_dict(self, m):
return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
ndim=m.ndim, format=m.format, readonly=m.readonly)
def test_datetime_memoryview(self):
# gh-11656
# Values verified with v1.13.3, shape is not () as in test_scalar_dim
dt1 = np.datetime64('2016-01-01')
dt2 = np.datetime64('2017-01-01')
expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
format='B', readonly=True)
v = memoryview(dt1)
assert self._as_dict(v) == expected
v = memoryview(dt2 - dt1)
assert self._as_dict(v) == expected
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(1, dt)
# Fails to create a PEP 3118 valid buffer
assert_raises((ValueError, BufferError), memoryview, a[0])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(dt1, ["WRITABLE"])
@pytest.mark.parametrize('s', [
pytest.param("\x32\x32", id="ascii"),
pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
])
def test_str_ucs4(self, s):
s = np.str_(s) # only our subclass implements the buffer protocol
# all the same, characters always encode as ucs4
expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
readonly=True)
v = memoryview(s)
assert self._as_dict(v) == expected
# integers of the paltform-appropriate endianness
code_points = np.frombuffer(v, dtype='i4')
assert_equal(code_points, [ord(c) for c in s])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(s, ["WRITABLE"])
def test_user_scalar_fails_buffer(self):
r = rational(1)
with assert_raises(TypeError):
memoryview(r)
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(r, ["WRITABLE"])
| {
"content_hash": "e412c2d2e9087947622e75a272bd295d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 80,
"avg_line_length": 36.52287581699346,
"alnum_prop": 0.594488188976378,
"repo_name": "rgommers/numpy",
"id": "0e6ab1015e15eedf077c3f8f9ccbdcb20d4d1d31",
"size": "5588",
"binary": false,
"copies": "11",
"ref": "refs/heads/main",
"path": "numpy/core/tests/test_scalarbuffer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5932714"
},
{
"name": "C++",
"bytes": "207284"
},
{
"name": "Cython",
"bytes": "146124"
},
{
"name": "D",
"bytes": "19"
},
{
"name": "Dockerfile",
"bytes": "5263"
},
{
"name": "Forth",
"bytes": "3787"
},
{
"name": "Fortran",
"bytes": "24695"
},
{
"name": "Makefile",
"bytes": "1697"
},
{
"name": "Python",
"bytes": "10467336"
},
{
"name": "Shell",
"bytes": "25583"
},
{
"name": "Smarty",
"bytes": "4104"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
''' My initial approach involved writing recursive formulas for elements on both
diagonals, than converting them to iterative formulas which i next simplified
by eliminating all summations. This worked, however was pretty cumbersome.
Than i did some reading and it turns out that a simple paradigm shift makes the
task much simpler. Instead of summing elements on the diagonals, we can think of it
as summing corners of the sub-spirals.
x-n+1 --> * . . . . . * <-- x
. . . . . . .
. . . . . . .
. . . . . . .
. . . . . . .
x-2n+2 --> * . . . . . * <-- x-3n+3
sum = x + (x-n+1) + (x-2n+2) + (x-3n+3)
= 4x - 6n + 6
Now for the spiral with size n, x = n^2, so we can substitute it in this equation:
sum = 4n^2 - 6n + 6
Now we just need to sum values of this formula for n = 1, 3, 5, ..., 1001
We can compute sum of each term of the equation separately:
sum n^2 = (4n^3 - n) / 3
sum n = 6n^2
sum 1 = 6n
Bringing all of it together we get a final equation:
solution = 4(4n^3 - n) / 3 - 6n^2 + 6n
'''
def cornerSum(n):
if n == 1: return 1
return 4 * n * n - 6 * n + 6
total = 0
for i in range(1, 1002, 2):
total += cornerSum(i)
print(total)
| {
"content_hash": "566d0a05e3313be0da8e191b00671d46",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 28.27659574468085,
"alnum_prop": 0.5425131677953349,
"repo_name": "buyuk-dev/project-euler-solutions",
"id": "6a9e369343f71312572296b2b475d5f77ae1ece7",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problem028.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "34890"
},
{
"name": "Python",
"bytes": "21737"
}
],
"symlink_target": ""
} |
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
could be written with ``gen`` as::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. versionchanged:: 3.2
Dict support added.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado import stack_context
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
future.add_done_callback(final_callback)
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
def multi_future(children):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not necessary to call `multi_future` explcitly, since the engine will
do so automatically when the generator yields a list of `Futures`.
This function is faster than the `Multi` `YieldPoint` because it does not
require the creation of a stack context.
.. versionadded:: 4.0
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
try:
result_list = [i.result() for i in children]
except Exception:
future.set_exc_info(sys.exc_info())
else:
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
for f in children:
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
timeout_handle = io_loop.add_timeout(
timeout,
lambda: result.set_exception(TimeoutError("Timeout")))
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
try:
value = future.result()
except Exception:
self.had_exception = True
yielded = self.gen.throw(*sys.exc_info())
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
if isinstance(yielded, list):
if all(is_future(f) for f in yielded):
yielded = multi_future(yielded)
else:
yielded = Multi(yielded)
elif isinstance(yielded, dict):
if all(is_future(f) for f in yielded.values()):
yielded = multi_future(yielded)
else:
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
elif is_future(yielded):
self.future = yielded
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
else:
self.future = TracebackFuture()
self.future.set_exception(BadYieldError(
"yielded unknown object %r" % (yielded,)))
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
| {
"content_hash": "82980478345342eb0e838086f80c3053",
"timestamp": "",
"source": "github",
"line_count": 751,
"max_line_length": 82,
"avg_line_length": 36.529960053262315,
"alnum_prop": 0.6053801851716848,
"repo_name": "robotlinker/robotlinker_core",
"id": "4bb82d422c7b48ab7c654e3556861ab6414398cb",
"size": "27434",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "src/rosbridge_suite/rosbridge_server/src/tornado/gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11643"
},
{
"name": "C++",
"bytes": "427040"
},
{
"name": "CMake",
"bytes": "71031"
},
{
"name": "CSS",
"bytes": "6420"
},
{
"name": "HTML",
"bytes": "266390"
},
{
"name": "JavaScript",
"bytes": "53686"
},
{
"name": "Python",
"bytes": "1585372"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
} |
"""Deployment serializers for orchestrator"""
from copy import deepcopy
from itertools import groupby
import sqlalchemy as sa
from sqlalchemy.orm import joinedload
import math
import six
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Node
from nailgun.extensions.volume_manager import manager as volume_manager
from nailgun import objects
from nailgun import utils
from nailgun.orchestrator.base_serializers import GraphBasedSerializer
from nailgun.orchestrator.base_serializers import MuranoMetadataSerializerMixin
from nailgun.orchestrator.base_serializers import \
VmwareDeploymentSerializerMixin
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer51
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer60
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer61
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer70
from nailgun.orchestrator.nova_serializers import \
NovaNetworkDeploymentSerializer
from nailgun.orchestrator.nova_serializers import \
NovaNetworkDeploymentSerializer61
class DeploymentMultinodeSerializer(GraphBasedSerializer):
nova_network_serializer = NovaNetworkDeploymentSerializer
neutron_network_serializer = NeutronNetworkDeploymentSerializer
critical_roles = ['controller', 'ceph-osd', 'primary-mongo']
def serialize(self, cluster, nodes, ignore_customized=False):
"""Method generates facts which
through an orchestrator passes to puppet
"""
def keyfunc(node):
return bool(node.replaced_deployment_info)
serialized_nodes = []
for customized, node_group in groupby(nodes, keyfunc):
if customized and not ignore_customized:
serialized_nodes.extend(
self.serialize_customized(cluster, node_group))
else:
serialized_nodes.extend(self.serialize_generated(
cluster, node_group))
# NOTE(dshulyak) tasks should not be preserved from replaced deployment
# info, there is different mechanism to control changes in tasks
# introduced during granular deployment, and that mech should be used
self.set_tasks(serialized_nodes)
return serialized_nodes
def serialize_generated(self, cluster, nodes):
nodes = self.serialize_nodes(nodes)
common_attrs = self.get_common_attrs(cluster)
self.set_deployment_priorities(nodes)
self.set_critical_nodes(nodes)
return [utils.dict_merge(node, common_attrs) for node in nodes]
def serialize_customized(self, cluster, nodes):
serialized = []
for node in nodes:
for role_data in node.replaced_deployment_info:
serialized.append(role_data)
return serialized
def get_common_attrs(self, cluster):
"""Cluster attributes."""
attrs = objects.Attributes.merged_attrs_values(cluster.attributes)
release = self.current_release(cluster)
attrs['deployment_mode'] = cluster.mode
attrs['deployment_id'] = cluster.id
attrs['openstack_version_prev'] = getattr(
self.previous_release(cluster), 'version', None)
attrs['openstack_version'] = release.version
attrs['fuel_version'] = cluster.fuel_version
attrs['nodes'] = self.node_list(
objects.Cluster.get_nodes_not_for_deletion(cluster))
# Adding params to workloads_collector
if 'workloads_collector' not in attrs:
attrs['workloads_collector'] = {}
attrs['workloads_collector']['create_user'] = \
objects.MasterNodeSettings.must_send_stats()
username = attrs['workloads_collector'].pop('user', None)
attrs['workloads_collector']['username'] = username
for node in attrs['nodes']:
if node['role'] in 'cinder':
attrs['use_cinder'] = True
self.set_storage_parameters(cluster, attrs)
net_serializer = self.get_net_provider_serializer(cluster)
net_common_attrs = net_serializer.get_common_attrs(cluster, attrs)
attrs = utils.dict_merge(attrs, net_common_attrs)
return attrs
def current_release(self, cluster):
"""Actual cluster release."""
return objects.Release.get_by_uid(cluster.pending_release_id) \
if cluster.status == consts.CLUSTER_STATUSES.update \
else cluster.release
def previous_release(self, cluster):
"""Returns previous release.
:param cluster: a ``Cluster`` instance to retrieve release from
:returns: a ``Release`` instance of previous release or ``None``
in case there's no previous release (fresh deployment).
"""
if cluster.status == consts.CLUSTER_STATUSES.update:
return cluster.release
return None
def set_storage_parameters(self, cluster, attrs):
"""Generate pg_num as the number of OSDs across the cluster
multiplied by 100, divided by Ceph replication factor, and
rounded up to the nearest power of 2.
"""
osd_num = 0
nodes = db().query(Node).filter(
Node.cluster == cluster
).filter(sa.or_(
Node.roles.any('ceph-osd'),
Node.pending_roles.any('ceph-osd')
)).options(joinedload('attributes'))
for node in nodes:
from nailgun.extensions.volume_manager.extension \
import VolumeManagerExtension
for disk in VolumeManagerExtension.get_volumes(node):
for part in disk.get('volumes', []):
if part.get('name') == 'ceph' and part.get('size', 0) > 0:
osd_num += 1
if osd_num > 0:
repl = int(attrs['storage']['osd_pool_size'])
pg_num = 2 ** int(math.ceil(math.log(osd_num * 100.0 / repl, 2)))
else:
pg_num = 128
attrs['storage']['pg_num'] = pg_num
@classmethod
def node_list(cls, nodes):
"""Generate nodes list. Represents
as "nodes" parameter in facts.
"""
node_list = []
for node in nodes:
for role in objects.Node.all_roles(node):
node_list.append(cls.serialize_node_for_node_list(node, role))
return node_list
@classmethod
def serialize_node_for_node_list(cls, node, role):
return {
'uid': node.uid,
'fqdn': node.fqdn,
'name': objects.Node.make_slave_name(node),
'role': role}
# TODO(apopovych): we have more generical method 'filter_by_roles'
def by_role(self, nodes, role):
return filter(lambda node: node['role'] == role, nodes)
def not_roles(self, nodes, roles):
return filter(lambda node: node['role'] not in roles, nodes)
def set_critical_nodes(self, nodes):
"""Set behavior on nodes deployment error
during deployment process.
"""
for n in nodes:
n['fail_if_error'] = n['role'] in self.critical_roles
def serialize_nodes(self, nodes):
"""Serialize node for each role.
For example if node has two roles then
in orchestrator will be passed two serialized
nodes.
"""
serialized_nodes = []
for node in nodes:
for role in objects.Node.all_roles(node):
serialized_nodes.append(self.serialize_node(node, role))
return serialized_nodes
def serialize_node(self, node, role):
"""Serialize node, then it will be
merged with common attributes
"""
node_attrs = {
# Yes, uid is really should be a string
'uid': node.uid,
'fqdn': node.fqdn,
'status': node.status,
'role': role,
'vms_conf': node.attributes.vms_conf,
# TODO (eli): need to remove, requried
# for the fake thread only
'online': node.online
}
net_serializer = self.get_net_provider_serializer(node.cluster)
node_attrs.update(net_serializer.get_node_attrs(node))
node_attrs.update(net_serializer.network_ranges(node.group_id))
node_attrs.update(self.get_image_cache_max_size(node))
node_attrs.update(self.generate_test_vm_image_data(node))
return node_attrs
def get_image_cache_max_size(self, node):
images_ceph = (node.cluster.attributes['editable']['storage']
['images_ceph']['value'])
if images_ceph:
image_cache_max_size = '0'
else:
from nailgun.extensions.volume_manager.extension \
import VolumeManagerExtension
image_cache_max_size = volume_manager.calc_glance_cache_size(
VolumeManagerExtension.get_volumes(node))
return {'glance': {'image_cache_max_size': image_cache_max_size}}
def generate_test_vm_image_data(self, node):
# Instantiate all default values in dict.
image_data = {
'container_format': 'bare',
'public': 'true',
'disk_format': 'qcow2',
'img_name': 'TestVM',
'img_path': '',
'os_name': 'cirros',
'min_ram': 64,
'glance_properties': '',
}
# Generate a right path to image.
c_attrs = node.cluster.attributes
if 'ubuntu' in c_attrs['generated']['cobbler']['profile']:
img_dir = '/usr/share/cirros-testvm/'
else:
img_dir = '/opt/vm/'
image_data['img_path'] = '{0}cirros-x86_64-disk.img'.format(img_dir)
glance_properties = []
# Alternate VMWare specific values.
if c_attrs['editable']['common']['libvirt_type']['value'] == 'vcenter':
image_data.update({
'disk_format': 'vmdk',
'img_path': '{0}cirros-i386-disk.vmdk'.format(img_dir),
})
glance_properties.append('--property vmware_disktype=sparse')
glance_properties.append('--property vmware_adaptertype=lsilogic')
glance_properties.append('--property hypervisor_type=vmware')
image_data['glance_properties'] = ' '.join(glance_properties)
return {'test_vm_image': image_data}
@classmethod
def get_net_provider_serializer(cls, cluster):
if cluster.net_provider == 'nova_network':
return cls.nova_network_serializer
else:
return cls.neutron_network_serializer
def filter_by_roles(self, nodes, roles):
return filter(
lambda node: node['role'] in roles, nodes)
class DeploymentHASerializer(DeploymentMultinodeSerializer):
"""Serializer for HA mode."""
critical_roles = ['primary-controller',
'primary-mongo',
'primary-swift-proxy',
'ceph-osd',
'controller']
def get_last_controller(self, nodes):
sorted_nodes = sorted(
nodes, key=lambda node: int(node['uid']))
controller_nodes = self.filter_by_roles(
sorted_nodes, ['controller', 'primary-controller'])
last_controller = None
if len(controller_nodes) > 0:
last_controller = controller_nodes[-1]['name']
return {'last_controller': last_controller}
@classmethod
def node_list(cls, nodes):
"""Node list
"""
node_list = super(
DeploymentHASerializer,
cls
).node_list(nodes)
for node in node_list:
node['swift_zone'] = node['uid']
return node_list
def get_common_attrs(self, cluster):
"""Common attributes for all facts
"""
common_attrs = super(
DeploymentHASerializer,
self
).get_common_attrs(cluster)
net_manager = objects.Cluster.get_network_manager(cluster)
common_attrs.update(net_manager.assign_vips_for_net_groups(cluster))
common_attrs['mp'] = [
{'point': '1', 'weight': '1'},
{'point': '2', 'weight': '2'}]
last_controller = self.get_last_controller(common_attrs['nodes'])
common_attrs.update(last_controller)
return common_attrs
class DeploymentMultinodeSerializer50(MuranoMetadataSerializerMixin,
DeploymentMultinodeSerializer):
pass
class DeploymentHASerializer50(MuranoMetadataSerializerMixin,
DeploymentHASerializer):
pass
class DeploymentMultinodeSerializer51(DeploymentMultinodeSerializer50):
nova_network_serializer = NovaNetworkDeploymentSerializer
neutron_network_serializer = NeutronNetworkDeploymentSerializer51
class DeploymentHASerializer51(DeploymentHASerializer50):
nova_network_serializer = NovaNetworkDeploymentSerializer
neutron_network_serializer = NeutronNetworkDeploymentSerializer51
class DeploymentMultinodeSerializer60(DeploymentMultinodeSerializer50):
nova_network_serializer = NovaNetworkDeploymentSerializer
neutron_network_serializer = NeutronNetworkDeploymentSerializer60
class DeploymentHASerializer60(DeploymentHASerializer50):
nova_network_serializer = NovaNetworkDeploymentSerializer
neutron_network_serializer = NeutronNetworkDeploymentSerializer60
class DeploymentMultinodeSerializer61(DeploymentMultinodeSerializer,
VmwareDeploymentSerializerMixin):
nova_network_serializer = NovaNetworkDeploymentSerializer61
neutron_network_serializer = NeutronNetworkDeploymentSerializer61
def serialize_node(self, node, role):
serialized_node = super(
DeploymentMultinodeSerializer61, self).serialize_node(node, role)
serialized_node['user_node_name'] = node.name
serialized_node.update(self.generate_vmware_data(node))
return serialized_node
@classmethod
def serialize_node_for_node_list(cls, node, role):
serialized_node = super(
DeploymentMultinodeSerializer61,
cls).serialize_node_for_node_list(node, role)
serialized_node['user_node_name'] = node.name
return serialized_node
class DeploymentHASerializer61(DeploymentHASerializer,
VmwareDeploymentSerializerMixin):
nova_network_serializer = NovaNetworkDeploymentSerializer61
neutron_network_serializer = NeutronNetworkDeploymentSerializer61
def serialize_node(self, node, role):
serialized_node = super(
DeploymentHASerializer61, self).serialize_node(node, role)
serialized_node['user_node_name'] = node.name
serialized_node.update(self.generate_vmware_data(node))
return serialized_node
@classmethod
def serialize_node_for_node_list(cls, node, role):
serialized_node = super(
DeploymentHASerializer61,
cls).serialize_node_for_node_list(node, role)
serialized_node['user_node_name'] = node.name
return serialized_node
# Alternate VMWare specific values.
# FiXME(who): srogov
# This a temporary workaround to keep existing functioanality
# after fully implementation of the multi HV support and astute part
# for multiple images support, it is need to change
# dict image_data['test_vm_image'] to list of dicts
def generate_test_vm_image_data(self, node):
attrs = node.cluster.attributes
image_data = super(
DeploymentHASerializer61,
self).generate_test_vm_image_data(node)
images_data = {}
images_data['test_vm_image'] = []
if attrs.get('editable', {}).get('common', {}). \
get('use_vcenter', {}).get('value') is True:
image_vmdk_data = deepcopy(image_data['test_vm_image'])
img_path = image_vmdk_data['img_path']. \
replace('x86_64-disk.img', 'i386-disk.vmdk')
image_vmdk_data.update({
'img_name': 'TestVM-VMDK',
'disk_format': 'vmdk',
'img_path': img_path,
})
image_vmdk_data['glance_properties'] = ' '.join([
'--property vmware_disktype=sparse',
'--property vmware_adaptertype=lsilogic',
'--property hypervisor_type=vmware'])
images_data['test_vm_image'].append(image_vmdk_data)
images_data['test_vm_image'].append(image_data['test_vm_image'])
else:
images_data = image_data
return images_data
class DeploymentHASerializer70(DeploymentHASerializer):
# nova_network_serializer is just for compatibility with current BVTs
# and other tests. It can be removed when tests are fixed.
nova_network_serializer = NovaNetworkDeploymentSerializer61
neutron_network_serializer = NeutronNetworkDeploymentSerializer70
def get_serializer_for_cluster(cluster):
"""Returns a serializer depends on a given `cluster`.
:param cluster: cluster to process
:returns: a serializer for a given cluster
"""
serializers_map = {
'5.0': {
'multinode': DeploymentMultinodeSerializer50,
'ha': DeploymentHASerializer50,
},
'5.1': {
'multinode': DeploymentMultinodeSerializer51,
'ha': DeploymentHASerializer51,
},
'6.0': {
'multinode': DeploymentMultinodeSerializer60,
'ha': DeploymentHASerializer60,
},
'6.1': {
'multinode': DeploymentMultinodeSerializer61,
'ha': DeploymentHASerializer61,
},
'7.0': {
# Multinode is not supported anymore
'ha': DeploymentHASerializer70,
}
}
env_mode = 'ha' if cluster.is_ha_mode else 'multinode'
for version, serializers in six.iteritems(serializers_map):
if cluster.release.environment_version.startswith(version):
return serializers[env_mode]
# return latest serializer by default
latest_version = sorted(six.iterkeys(serializers_map))[-1]
return serializers_map[latest_version][env_mode]
def serialize(orchestrator_graph, cluster, nodes, ignore_customized=False):
"""Serialization depends on deployment mode
"""
objects.Cluster.set_primary_roles(cluster, nodes)
env_version = cluster.release.environment_version
# Only assign IPs for private (GRE) network in 6.1+
if any([env_version.startswith(v) for v in ['5.0', '5.1', '6.0']]):
objects.NodeCollection.prepare_for_lt_6_1_deployment(cluster.nodes)
else:
nst = cluster.network_config.get('segmentation_type')
objects.NodeCollection.prepare_for_deployment(cluster.nodes, nst)
serializer = get_serializer_for_cluster(cluster)(orchestrator_graph)
return serializer.serialize(
cluster, nodes, ignore_customized=ignore_customized)
| {
"content_hash": "47cc76151d671ef459983cc91cd7429d",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 79,
"avg_line_length": 36.46869070208729,
"alnum_prop": 0.6342681721213382,
"repo_name": "prmtl/fuel-web",
"id": "616efefd25279d4742bd5bdd1290c0eae496b983",
"size": "19854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/orchestrator/deployment_serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67993"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "JavaScript",
"bytes": "774488"
},
{
"name": "Mako",
"bytes": "1449"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "4031810"
},
{
"name": "Ruby",
"bytes": "36362"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from django.apps import apps
from django.utils.functional import cached_property
def model_path(obj):
return '%s.%s' % (obj._meta.app_label, obj._meta.object_name.lower())
class IdentifierManager(object):
@cached_property
def sites(self):
from django.contrib.sites.models import Site
try:
_sites = dict([(s.id, s) for s in Site.objects.all()])
except: # NOQA
_sites = {1: Site(domain='default.example', name='default')}
return _sites
@cached_property
def ctypes(self):
from django.contrib.contenttypes.models import ContentType
return dict([("%s.%s" % (c.app_label, c.model), c) for c in ContentType.objects.all()])
def resolve_identifier(self, identifier):
"""
The opposite of ``get_identifier()``
"""
app_module, site_id, object_id = identifier.split(':')
app_label, model = app_module.split('.')
site = self.sites[int(site_id)]
ModelClass = apps.get_model(app_label, model)
model = ModelClass.objects.get(pk=object_id)
return model, site
def identifier_to_dict(self, identifier, score=None, related=False):
"""
The opposite of ``get_identifier()``
"""
if type(identifier) is not str:
identifier = identifier.decode('utf-8')
app_module, site_id, object_id = identifier.split(':')
ctype = self.ctypes[app_module]
if related:
spec = {
'related_object_ctype': ctype.id,
'related_object_id': int(object_id),
'related_object_site': int(site_id)
}
else:
spec = {
'object_ctype': ctype.id,
'object_id': int(object_id),
'object_site': int(site_id)
}
if score is not None:
spec['score'] = score
return spec
def get_identifier(self, obj, site_id):
"""
Given a Django Model, returns a string identifier in the format
<app_label>.<model>:<site_id>:<object_id>.
"""
return "%s:%s:%s" % (model_path(obj), site_id, obj.id)
def convert_vote_list_to_userprefs(vote_list):
"""
Return a user-centerd prefernce matrix.
``vote_list must be`` composed of (user_id, object_identifier, rating)
``object_identifier`` is any string that uniquely identifies the object ie:
<app_label>.<model>:<object_id>.
The ``utils.get_identifier`` method is provided as convenience for creating such identifiers.
"""
prefs = defaultdict(dict)
for pref in vote_list:
prefs[pref[0]][pref[1]] = pref[2]
return prefs
def convert_vote_list_to_itemprefs(vote_list):
"""
Return a item-centerd prefernce matrix.
``vote_list must be`` composed of (user_id, object_identifier, rating)
``object_identifier`` is any string that uniquely identifies the object ie:
<app_label>.<model>:<object_id>.
The ``utils.get_identifier`` method is provided as convenience for creating such identifiers.
"""
prefs = defaultdict(dict)
for pref in vote_list:
prefs[pref[1]][pref[0]] = pref[2]
return prefs
def similary_results_to_itemMatch(qs, provider):
itemMatch = defaultdict(list)
for i in qs:
site = i.related_object_site
item = provider.get_identifier(i.get_object(), site)
similarity = i.score
item2 = provider.get_identifier(i.get_related_object(), site)
itemMatch[item].append((similarity, item2))
return itemMatch
| {
"content_hash": "36e267429c5a558c4a8a87303949617f",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 97,
"avg_line_length": 31.31896551724138,
"alnum_prop": 0.6014313239746766,
"repo_name": "fcurella/django-recommends",
"id": "baa03e047709605d4c7a6bfc963d650956685904",
"size": "3633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recommends/converters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "99259"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys
name = 'scheduling'
description = 'Scheduling web application'
long_description = 'Allows for the generation and editing of a monthly schedule'
version = '0.0.0'
maintainer = ''
maintainer_email = ''
url = ''
packages = find_packages()
include_package_data = True
package_data = {'': ['static/*', 'views/*']}
install_requires = ['Flask', 'Flask-SQLAlchemy', 'Flask-Admin']
data_files = None
if getattr(sys, 'real_prefix', None) is None:
data_files = [('/etc/scheduling', ['scheduling.wsgi'])]
if __name__ == '__main__':
setup(name=name,
description=description,
long_description=long_description,
version=version,
maintainer=maintainer,
maintainer_email=maintainer_email,
url=url,
packages=packages,
include_package_data=include_package_data,
package_data=package_data,
install_requires=install_requires,
data_files=data_files)
| {
"content_hash": "ab7d7552bde484be90698d60f87b8643",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 32.354838709677416,
"alnum_prop": 0.6530408773678963,
"repo_name": "schana/scheduling-python",
"id": "9b49ff3ab651ce1566a807bcdb3cd57f5267ab9a",
"size": "1003",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8523"
}
],
"symlink_target": ""
} |
import os.path
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def has_delete_marker_file(directory):
# type: (str) -> bool
return os.path.exists(os.path.join(directory, PIP_DELETE_MARKER_FILENAME))
def write_delete_marker_file(directory):
# type: (str) -> None
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
| {
"content_hash": "2b7ba7f6b85470207a4a063f856b9cd8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 29.64,
"alnum_prop": 0.708502024291498,
"repo_name": "eammx/proyectosWeb",
"id": "42ea81405085a0000c587ad563fee30c7f37a026",
"size": "741",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "proyectoPython/env/lib/python3.6/site-packages/pip/_internal/utils/marker_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18790"
},
{
"name": "PHP",
"bytes": "60704"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
import sys
if sys.version_info.major < 3:
msg = "Sorry, Python 2 is not supported (yet)"
print >> sys.stderr, msg
sys.exit(1)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup (
name = 'github-export',
version = '0.1.1',
author = 'taeguk',
author_email = '[email protected]',
url = 'https://github.com/taeguk/github-export',
license='MIT',
description = "Export your github repositories.",
long_description = long_description,
packages=find_packages(),
scripts=['bin/github-export'],
include_package_data = True,
install_requires = ['requests', 'gitpython'],
classifiers = [
'Programming Language :: Python :: 3 :: Only',
],
keywords = 'github backup export repository',
)
| {
"content_hash": "b7c0087ace45dc71d14f69ba4cacdd95",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 64,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6372141372141372,
"repo_name": "taeguk/github-export",
"id": "d757a7e2f266105a9629d6840d4053802c8f4cf3",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15508"
}
],
"symlink_target": ""
} |
import sys,os
import fileinput
import numpy
STEP_NUM=100
REP_NUM=100
START=0
END=0.05
SINGLETON=1
REDUN_RATIO_CUTOFF=0.2
def main():
if len(sys.argv) != 6:
mes = 'Usage: python {} <abundance.table> start end steps reps'
print >> sys.stderr, mes.format(os.path.basename(sys.argv[0]))
sys.exit(1)
table_file = sys.argv[1]
START=float(sys.argv[2])
END=float(sys.argv[3])
STEP_NUM=int(sys.argv[4])
REP_NUM=int(sys.argv[5])
# split at linear space
#subsample_rate_list = numpy.linspace(START, END, STEP_NUM)
# split at log space
if START == 0: # avoid log(0)
START = 0.0001
subsample_rate_list = numpy.logspace(
numpy.log10(START),
numpy.log10(END),
STEP_NUM,
base=10,
)
total_read_num = 0
redun_read_num = 0
counting_array_list = [numpy.zeros(REP_NUM)] * STEP_NUM
line_num = 0
for line in fileinput.input(table_file):
line_num += 1
if line.startswith('#'):
continue
line = line.rstrip()
#name, cnt = line.split('\t')[:2]
name, cnt = line.split()[:2]
cnt = int(cnt)
total_read_num += 1
if cnt > SINGLETON:
redun_read_num += 1
else:
continue
if total_read_num % 100000 ==0:
print >> sys.stderr, '{} reads processed..'.format(total_read_num)
ratio = redun_read_num*1.0/total_read_num
if ratio < REDUN_RATIO_CUTOFF:
_m = ('** Redundancy ratio {} falls below 20%, '
'more sequencing is needed for sequencing depth '
'estimation'
)
print >> sys.stderr, _m.format(ratio)
sys.exit(1)
# interate throught sample rates
for ind, p in enumerate(subsample_rate_list):
if cnt*p <=1:
continue
else:
# if redundant, update the array of a sample rate
ys = numpy.random.binomial(1, p, REP_NUM)
counting_array_list[ind] = counting_array_list[ind] + ys
subsample_num_list = subsample_rate_list * total_read_num
_temp_list = zip(subsample_num_list, counting_array_list)
for subsample_num, counting_array in _temp_list:
if subsample_num == 0:
assert sum(counting_array) == 0
redun_ratio_array = counting_array
else:
redun_ratio_array = counting_array/subsample_num
redun_ratio_array[redun_ratio_array > 1] = 1
mean = numpy.mean(redun_ratio_array)
std = numpy.std(redun_ratio_array)
q1, q2, q3 = numpy.percentile(redun_ratio_array, [25, 50, 75])
print '{}\t{}\t{}\t{}\t{}\t{}'.format(
subsample_num, mean, std, q1, q2, q3,
)
if __name__ == '__main__':
main()
| {
"content_hash": "478417fc4b71e33bc07904d753a6ef8b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 33.26086956521739,
"alnum_prop": 0.5120915032679738,
"repo_name": "jiarong/seqdep",
"id": "3e8d4262ba54c39743077c269af238b3598d51a0",
"size": "3160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/redunancy-curve.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15179"
},
{
"name": "R",
"bytes": "8043"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from . import tools_tags as tools
from .azure_common import BaseTest
from c7n_azure.actions.tagging import Tag
from mock import patch, Mock
from c7n.filters import FilterValidationError
class ActionsTagTest(BaseTest):
existing_tags = {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}
def _get_action(self, data):
return Tag(data=data, manager=Mock())
def test_schema_validate(self):
self.assertTrue(
self.load_policy(
tools.get_policy([
{'type': 'tag',
'tag': 'test',
'value': 'test_value'}
]),
validate=True))
self.assertTrue(
self.load_policy(
tools.get_policy([
{'type': 'tag',
'tags': {'tag1': 'test'}}
]),
validate=True))
self.assertTrue(self.load_policy({
'name': 'test-tag-schema-validate',
'resource': 'azure.vm',
'actions': [
{'type': 'tag',
'tag': {
'type': 'resource',
'key': 'name'
},
'value': {
'type': 'resource',
'key': 'name'
}},
]
}, validate=True))
with self.assertRaises(FilterValidationError):
# Can't have both tags and tag/value
self.load_policy(tools.get_policy([
{'type': 'tag',
'tags': {'tag2': 'value2'},
'tag': 'tag1',
'value': 'value1'}
]), validate=True)
with self.assertRaises(FilterValidationError):
# Required tags or tag/value
self.load_policy(tools.get_policy([
{'type': 'tag'}
]), validate=True)
with self.assertRaises(FilterValidationError):
# Empty tags
self.load_policy(tools.get_policy([
{'type': 'tag',
'tags': {}}
]), validate=True)
with self.assertRaises(FilterValidationError):
# Missing value
self.load_policy(tools.get_policy([
{'type': 'tag',
'tag': 'myTag'}
]), validate=True)
with self.assertRaises(FilterValidationError):
# Missing tag
self.load_policy(tools.get_policy([
{'type': 'tag',
'value': 'myValue'}
]), validate=True)
@patch('c7n_azure.tags.TagHelper.update_resource_tags')
def test_add_or_update_single_tag(self, update_resource_tags):
"""Verifies we can add a new tag to a VM and not modify
an existing tag on that resource
"""
action = self._get_action({'tag': 'tag1', 'value': 'value1'})
resource = tools.get_resource(self.existing_tags)
action.process([resource])
tags = tools.get_tags_parameter(update_resource_tags)
expected_tags = self.existing_tags.copy()
expected_tags.update({'tag1': 'value1'})
self.assertEqual(tags, expected_tags)
@patch('c7n_azure.tags.TagHelper.update_resource_tags')
def test_add_or_update_single_tag_from_resource(self, update_resource_tags):
"""Verifies we can add a new tag to a VM from values on the VM
"""
action = self._get_action(
{
'tag': {
'type': 'resource',
'key': 'name'
},
'value': {
'type': 'resource',
'key': 'type'
}
})
resource = tools.get_resource(self.existing_tags)
action.process([resource])
tags = tools.get_tags_parameter(update_resource_tags)
expected_tags = self.existing_tags.copy()
expected_tags.update({resource['name']: resource['type']})
self.assertEqual(tags, expected_tags)
@patch('c7n_azure.tags.TagHelper.update_resource_tags')
def test_add_or_update_single_tag_from_resource_default(self, update_resource_tags):
"""Verifies we can add a new tag to a VM from values on the VM
when values do not exist with default-value
"""
action = self._get_action(
{
'tag': {
'type': 'resource',
'key': 'doesnotexist',
'default-value': 'default_tag'
},
'value': {
'type': 'resource',
'key': 'doesnotexist',
'default-value': 'default_value'
}
})
resource = tools.get_resource(self.existing_tags)
action.process([resource])
tags = tools.get_tags_parameter(update_resource_tags)
expected_tags = self.existing_tags.copy()
expected_tags.update({'default_tag': 'default_value'})
self.assertEqual(tags, expected_tags)
@patch('c7n_azure.tags.TagHelper.update_resource_tags')
def test_add_or_update_tags(self, update_resource_tags):
"""Adds tags to an empty resource group, then updates one
tag and adds a new tag
"""
action = self._get_action({'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}})
resource = tools.get_resource(self.existing_tags)
action.process([resource])
tags = tools.get_tags_parameter(update_resource_tags)
expected_tags = self.existing_tags.copy()
expected_tags.update({'tag1': 'value1', 'pre-existing-1': 'modified'})
self.assertEqual(tags, expected_tags)
| {
"content_hash": "3a6b103b8ceca85b4afd3f02629b010c",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 93,
"avg_line_length": 32.25966850828729,
"alnum_prop": 0.5149854427127933,
"repo_name": "Sutto/cloud-custodian",
"id": "9d52626be01d339633cb733d4c1e65b353ae44bb",
"size": "6420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests_azure/test_actions_tag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5283859"
},
{
"name": "Shell",
"bytes": "12627"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import sys
import os
import re
MYDIR=''
if __name__ == '__main__':
MYDIR=os.path.dirname(sys.argv[0])
if '' == MYDIR:
MYDIR='.'
WORKDIR=os.getcwd()
CONFDIR=''
if __name__ == '__main__':
CONFDIR=sys.argv[1]
#################################
# Get info from configure.ac #
# file #
#################################
LIB_NAME=''
LIB_MODULE=''
LIB_VERSION=''
if __name__ == '__main__':
configureAcFile = open(CONFDIR + '/configure.ac', 'rb')
AC_INIT_LINE=configureAcFile.readline().decode("utf-8")
while not AC_INIT_LINE.startswith('AC_INIT') and '' != AC_INIT_LINE:
AC_INIT_LINE=configureAcFile.readline().decode("utf-8")
if '' == AC_INIT_LINE:
ARPrint('Unable to read from configure.ac file !')
sys.exit(1)
AC_ARGS=re.findall(r'\[[^]]*\]', AC_INIT_LINE)
LIB_NAME=AC_ARGS[0].replace('[', '').replace(']', '')
LIB_MODULE=LIB_NAME.replace('lib', '')
LIB_VERSION=AC_ARGS[1].replace('[', '').replace(']', '')
#################################
# Generic infos about the lib #
#################################
# Directories
SRC_DIR = CONFDIR + '/../Sources/'
INC_DIR = CONFDIR + '/../Includes/' + LIB_NAME + '/'
BUILD_DIR = CONFDIR + '/'
JNI_C_DIR = CONFDIR + '/../JNI/c/'
JNI_JAVA_DIR = CONFDIR + '/../JNI/java/'
# Java/JNI package
JAVA_PACKAGE = 'com.parrot.arsdk.' + LIB_MODULE.lower()
JAVA_PACKAGE_DIR = JAVA_PACKAGE.replace('.', '/')
JAVA_OUT_DIR = JNI_JAVA_DIR + JAVA_PACKAGE_DIR + '/'
# Create dir if neededif not os.path.exists(SRC_DIR):
if __name__ == '__main__':
if not os.path.exists(SRC_DIR):
os.makedirs(SRC_DIR)
if not os.path.exists(INC_DIR):
os.makedirs(INC_DIR)
if not os.path.exists(JNI_C_DIR):
os.makedirs(JNI_C_DIR)
if not os.path.exists(JNI_JAVA_DIR):
os.makedirs(JNI_JAVA_DIR)
if not os.path.exists(JAVA_OUT_DIR):
os.makedirs(JAVA_OUT_DIR)
# Generated file disclaimer
GENERATED_FILE_DISCLAIMER='''
/*
* GENERATED FILE
* Do not modify this file, it will be erased during the next configure run
*/
'''
#################################
# Internal functions #
#################################
def ARPrint(msg, noNewLine=0):
sys.stdout.write(msg)
if 0 == noNewLine:
sys.stdout.write('\n')
else:
sys.stdout.write(' ')
def ARCapitalize(arstr):
return arstr[0].upper() + arstr[1:]
def ARStringIsInteger(arstr):
try:
int(arstr)
return True
except ValueError:
return False
#################################
# Add Prebuild processing here #
#################################
#################################
# Generate JAVA Enums from C #
# Enums #
#################################
class AREnumType:
def __init__(self):
self.name = ''
self.entries = []
self.hasToString = False
self.toStringFileName = ''
self.toStringPrototype = ''
def addEntry(self, entry):
self.entries.append(entry)
def setName(self, name):
self.name = name
def setToStringFileName(self, fname, prototype):
self.hasToString = True
self.toStringFileName = fname
self.toStringPrototype = prototype
class AREnumEntry:
"Represent an enum entry in C"
def __init__(self, name, value, comment):
self.name = name
self.value = value
self.comment = comment
def readEnumEntriesFromFile(filename):
ALL_LINES = [line.strip() for line in open(filename)]
DATA_LINES = []
# Strip empty lines
for line in ALL_LINES:
if line:
DATA_LINES.append(line)
# Loop over lines to find enums
currentEnumType = AREnumType()
allEnums = []
foundEnum = False
noEnumInThisFile = False
previousValue = -1 # So next value is zero
for line in DATA_LINES:
if line == '// ARSDK_NO_ENUM_PREPROCESS //':
noEnumInThisFile = True
break
if not foundEnum:
if line.startswith('typedef enum'):
foundEnum = True
previousValue = -1
elif 'ToString' in line:
_, _, enumName = line.partition('(')
enumName, _, _ = enumName.partition(' ')
for enum in allEnums:
if enum.name == enumName:
cFileName = os.path.dirname(filename) + '/' + os.path.basename(filename).replace('.h', '.c')
cFileName = cFileName.replace(INC_DIR, SRC_DIR)
prototype = line.rstrip(';')
enum.setToStringFileName(cFileName, prototype)
break
else:
if line.startswith('}'):
foundEnum = False
enumName, _, _ = line.partition(';')
_, _, enumName = enumName.partition('}')
enumName = enumName.strip()
currentEnumType.setName(enumName)
allEnums.append(currentEnumType)
currentEnumType = AREnumType()
elif not line.startswith('{'):
if re.match('[\ \t]*[/*]', line) is not None:
continue
# Get name
name, _, _ = line.partition(' ')
name = name.strip(',')
# Get value
_, _, value = line.partition('=')
value, _, _ = value.partition(',')
value = value.strip()
if not value:
value = str(previousValue + 1)
while not ARStringIsInteger(value):
for prevEntry in currentEnumType.entries:
if prevEntry.name == value:
value = prevEntry.value
break
break
previousValue = int(value)
# Get comment
_, _, comment = line.partition('/**<')
comment, _, _ = comment.partition('*/')
comment = comment.strip()
# If the comment is not in /**< */ format, try ///< format
if comment == '':
_, _, comment = line.partition('///<')
comment = comment.strip()
entry = AREnumEntry(name, value, comment)
currentEnumType.addEntry(entry)
if noEnumInThisFile:
return []
else:
return allEnums
def entryConstructor(entry, last=False):
retVal = ' '
if entry.comment != '':
retVal += '/** ' + entry.comment + ' */\n '
if entry.comment == '':
retVal += entry.name + ' (' + entry.value + ')'
else:
retVal += entry.name + ' (' + entry.value + ', "' + entry.comment + '")'
if last:
retVal += ';'
else:
retVal += ','
retVal += '\n'
return retVal
def writeEnumToJavaFile(enumType):
CLASS_NAME = enumType.name.lstrip('e') + '_ENUM'
JFILE_NAME = JAVA_OUT_DIR + CLASS_NAME + '.java'
jfile = open(JFILE_NAME, 'w')
jfile.write(GENERATED_FILE_DISCLAIMER)
jfile.write('\n')
jfile.write('package ' + JAVA_PACKAGE + ';\n')
jfile.write('\n')
jfile.write('import java.util.HashMap;\n')
jfile.write('\n')
jfile.write('/**\n')
jfile.write(' * Java copy of the ' + enumType.name + ' enum\n')
jfile.write(' */\n')
jfile.write('public enum ' + CLASS_NAME + ' {\n')
unknownEnumEntry = AREnumEntry(enumType.name + "_UNKNOWN_ENUM_VALUE", "Integer.MIN_VALUE", "Dummy value for all unknown cases")
jfile.write(entryConstructor(unknownEnumEntry))
for entry in enumType.entries[:-1]:
jfile.write(entryConstructor(entry))
entry = enumType.entries[-1]
jfile.write(entryConstructor(entry, True))
jfile.write('\n')
jfile.write(' private final int value;\n')
jfile.write(' private final String comment;\n');
jfile.write(' static HashMap<Integer, ' + CLASS_NAME + '> valuesList;\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = null;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value, String comment) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = comment;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the int value of the enum\n')
jfile.write(' * @return int value of the enum\n')
jfile.write(' */\n')
jfile.write(' public int getValue () {\n')
jfile.write(' return value;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the ' + CLASS_NAME + ' instance from a C enum value\n')
jfile.write(' * @param value C value of the enum\n')
jfile.write(' * @return The ' + CLASS_NAME + ' instance, or null if the C enum value was not valid\n')
jfile.write(' */\n')
jfile.write(' public static ' + CLASS_NAME + ' getFromValue (int value) {\n')
jfile.write(' if (null == valuesList) {\n')
jfile.write(' ' + CLASS_NAME + ' [] valuesArray = ' + CLASS_NAME + '.values ();\n')
jfile.write(' valuesList = new HashMap<Integer, ' + CLASS_NAME + '> (valuesArray.length);\n')
jfile.write(' for (' + CLASS_NAME + ' entry : valuesArray) {\n')
jfile.write(' valuesList.put (entry.getValue (), entry);\n')
jfile.write(' }\n')
jfile.write(' }\n')
jfile.write(' ' + CLASS_NAME + ' retVal = valuesList.get (value);\n')
jfile.write(' if (retVal == null) {\n')
jfile.write(' retVal = ' + unknownEnumEntry.name + ';\n')
jfile.write(' }\n')
jfile.write(' return retVal;')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Returns the enum comment as a description string\n')
jfile.write(' * @return The enum description\n')
jfile.write(' */\n')
jfile.write(' public String toString () {\n')
jfile.write(' if (this.comment != null) {\n')
jfile.write(' return this.comment;\n')
jfile.write(' }\n')
jfile.write(' return super.toString ();\n')
jfile.write(' }\n')
jfile.write('}\n')
jfile.close()
def writeToStringFunction(enumType):
if not enumType.hasToString:
return
CNAME = os.path.basename(enumType.toStringFileName)
HNAME = LIB_NAME + '/' + CNAME.replace('.c', '.h')
VARNAME, _, _ = enumType.toStringPrototype.partition(')')
_, _, VARNAME = VARNAME.partition(enumType.name + ' ')
cfile = open(enumType.toStringFileName, 'w')
cfile.write(GENERATED_FILE_DISCLAIMER)
cfile.write('\n')
cfile.write('/**\n')
cfile.write(' * @file ' + CNAME + '\n')
cfile.write(' * @brief ToString function for ' + enumType.name + ' enum\n')
cfile.write(' */\n')
cfile.write('\n')
cfile.write('#include <' + HNAME + '>\n')
cfile.write('\n')
cfile.write(enumType.toStringPrototype + '\n')
cfile.write('{\n')
cfile.write(' switch (' + VARNAME + ')\n')
cfile.write(' {\n')
for entry in enumType.entries:
cfile.write(' case ' + entry.name + ':\n')
cfile.write(' return "' + entry.comment + '";\n')
cfile.write(' break;\n')
cfile.write(' default:\n')
cfile.write(' return "Unknown value";\n')
cfile.write(' break;\n')
cfile.write(' }\n')
cfile.write(' return "Unknown value";\n')
cfile.write('}\n')
cfile.close()
def writeJavaEnumFileFormHeaderFile(headerFile, javaOutDir, javaPackage):
global JNI_JAVA_DIR
global JAVA_PACKAGE
global JAVA_PACKAGE_DIR
global JAVA_OUT_DIR
JNI_JAVA_DIR=javaOutDir
JAVA_PACKAGE = javaPackage
JAVA_PACKAGE_DIR = JAVA_PACKAGE.replace('.', '/')
JAVA_OUT_DIR = JNI_JAVA_DIR + "/" + JAVA_PACKAGE_DIR + '/'
if not os.path.exists(JAVA_OUT_DIR):
os.makedirs(JAVA_OUT_DIR)
allEnums = readEnumEntriesFromFile(headerFile)
for enumType in allEnums:
writeEnumToJavaFile(enumType)
writeToStringFunction(enumType)
#################################
# Generate JAVA Enums from C #
# Main #
#################################
if __name__ == '__main__':
for fname in os.listdir(INC_DIR):
if fname.endswith('.h'):
completeFile = INC_DIR + fname
allEnums = readEnumEntriesFromFile(completeFile)
for enumType in allEnums:
writeEnumToJavaFile(enumType)
writeToStringFunction(enumType)
#END
| {
"content_hash": "a61e1e57a22d09cdb25e7b39984e171b",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 131,
"avg_line_length": 36.76590330788804,
"alnum_prop": 0.5523565644681293,
"repo_name": "cvramire/ARSDKBuildUtils",
"id": "ab3b577938276b81c5514feb02e85c213b0ddba1",
"size": "16011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Utils/Python/ARSDK_PrebuildActions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "242456"
},
{
"name": "Shell",
"bytes": "23045"
}
],
"symlink_target": ""
} |
from SimpleCV import Camera
from datetime import datetime
from io import BytesIO
DEFAULT_PICTURE_PATH = "/tmp/"
try:
web_camera = Camera()
web_camera.getImage()
print('webcam Init.')
except:
web_camera = None
def take_picture_web():
file_stream = BytesIO()
img_raw = web_camera.getImage().getPIL()
img_raw.save(file_stream, format='jpeg')
return file_stream.getvalue()
if __name__ == "__main__":
raw = take_picture_web()
file_name = datetime.now().strftime("%Y%m%d%H%M%S") + ".jpg"
with open(DEFAULT_PICTURE_PATH + file_name, 'wb') as f:
f.write(raw)
print('Take Picture by Webcam. Save to {}'.format(DEFAULT_PICTURE_PATH))
| {
"content_hash": "806e3c68184734110ec20c2458c11d58",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 25.40740740740741,
"alnum_prop": 0.6428571428571429,
"repo_name": "farmy-maker/farmy-py",
"id": "91e3741ffce6e2314b06476ac652122235b9c545",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "farmy/modules/camera/webcam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12080"
}
],
"symlink_target": ""
} |
import datetime
from django.template import Context
from django.template import Template
from django.test import TestCase
from django.utils import timezone
from content.models import Event
from content.models import Message
from content.models import Post
from content.models import Venue
class EventMethodTests(TestCase):
def test_is_upcoming_with_future_date(self):
"""
is_upcoming() should return true if the date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
upcoming_event = Event(event_date=time)
self.assertEqual(upcoming_event.is_upcoming(), True)
def test_is_upcoming_with_past_date(self):
"""
is_upcoming() should return false if the date is in the past.
"""
time = timezone.now() + datetime.timedelta(days=-30)
upcoming_event = Event(event_date=time)
self.assertEqual(upcoming_event.is_upcoming(), False)
class PostViewTests(TestCase):
def test_index_view_with_no_posts(self):
"""
Test output when there are no posts.
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No posts to display.')
self.assertQuerysetEqual(response.context['latest_post_list'], [])
def test_index_view_with_new_post(self):
"""
Test output with a new post.
"""
create_post('NEW POST')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'NEW POST')
post_text = '<Post: NEW POST - ' + timezone.now().strftime('%d/%m/%y') + '>'
self.assertQuerysetEqual(response.context['latest_post_list'], [post_text])
class EventViewTests(TestCase):
def test_view_with_no_posts(self):
"""
Test output when there are no events.
"""
response = self.client.get('/events')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No events to display.')
self.assertQuerysetEqual(response.context['event_list'], [])
class MessageFormTests(TestCase):
def test_creation_of_new_message(self):
"""
Test that sent messages are saved to db.
"""
response = self.client.get('/contact')
self.assertEqual(response.status_code, 200)
post_data = {
'msg_author': '[email protected]',
'msg_subject': 'test',
'msg_text': 'testing message creation',
}
response = self.client.post('/contact', post_data)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('location').endswith('/contact/thanks'))
m = Message.objects.last()
self.assertEqual(m.msg_author, '[email protected]')
self.assertEqual(m.msg_subject, 'test')
self.assertEqual(m.msg_text, 'testing message creation')
class EventSidebarTests(TestCase):
def test_with_no_events(self):
"""
Test output when there are no events.
"""
rendered = render_template('{% load content_extras %}', '{% show_upcoming_events %}')
self.assertIsNotNone(rendered)
self.assertIn(rendered, 'No events to display.')
def test_with_single_event(self):
"""
Test output with a single event.
"""
test_date = timezone.now()
test_venue = create_venue(name='Lost nightclub', address='unknown')
create_event(title='Big Event', venue=test_venue, date=test_date, desc='')
rendered = render_template('{% load content_extras %}', '{% show_upcoming_events %}')
self.assertIsNotNone(rendered)
self.assertIn(rendered, 'Big Event')
self.assertIn(rendered, test_date.strftime('%d/%m/%y'))
def create_event(title, venue, date, desc=''):
"""
Creates a new event.
"""
return Event.objects.create(event_title=title, event_description=desc,
event_venue=venue, event_date=date)
def create_post(title, desc=''):
"""
Creates a new post.
"""
return Post.objects.create(post_title=title, post_description=desc)
def create_message(author, subject, text):
"""
Creates a new message.
"""
return Message.objects.create(msg_author=author, msg_subject=subject, msg_text=text)
def create_venue(name, address):
"""
Creates a new venue.
"""
return Venue.objects.create(venue_name=name, venue_address=address)
def render_template(string, context=None):
if context is None:
context = {}
context = Context(context)
return Template(string).render(context)
| {
"content_hash": "4c5379567e86ae23a136758aeeb9e3a3",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 93,
"avg_line_length": 32.02040816326531,
"alnum_prop": 0.6301253452305078,
"repo_name": "sfowl/fowllanguage",
"id": "bda40fece718fa99f81151bf57054d3d8ffcfb6f",
"size": "4707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23132"
},
{
"name": "HTML",
"bytes": "7043"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Python",
"bytes": "35494"
}
],
"symlink_target": ""
} |
{
'name': 'Repairs Management',
'version': '1.0',
'category': 'Manufacturing',
'description': """
The aim is to have a complete module to manage all products repairs.
====================================================================
The following topics should be covered by this module:
------------------------------------------------------
* Add/remove products in the reparation
* Impact for stocks
* Invoicing (products and/or services)
* Warranty concept
* Repair quotation report
* Notes for the technician and for the final customer
""",
'author': 'OpenERP SA',
'images': ['images/repair_order.jpeg'],
'depends': ['mrp', 'sale', 'account'],
'data': [
'security/ir.model.access.csv',
'security/mrp_repair_security.xml',
'mrp_repair_data.xml',
'mrp_repair_sequence.xml',
'wizard/mrp_repair_cancel_view.xml',
'wizard/mrp_repair_make_invoice_view.xml',
'mrp_repair_view.xml',
'mrp_repair_workflow.xml',
'mrp_repair_report.xml',
],
'demo': ['mrp_repair_demo.yml'],
'test': ['test/test_mrp_repair_noneinv.yml',
'test/test_mrp_repair_b4inv.yml',
'test/test_mrp_repair_afterinv.yml',
'test/test_mrp_repair_cancel.yml',
'test/mrp_repair_report.yml',
'test/test_mrp_repair_fee.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "4c971ee4497bb477a09b8847a5a56dae",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 68,
"avg_line_length": 35.27906976744186,
"alnum_prop": 0.5589980224126566,
"repo_name": "ntiufalara/openerp7",
"id": "6f324c43f7a708ce0497952564215529a7debab9",
"size": "2497",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "openerp/addons/mrp_repair/__openerp__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('webpage_url', models.URLField(blank=True, max_length=255)),
('meetup_url', models.URLField(blank=True, max_length=255)),
('image', models.FileField(blank=True, max_length=255, null=True, upload_to=b'uploads/usergroups/')),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
]
| {
"content_hash": "959e49aa452e0c0d3cea261193fca394",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 117,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.5472312703583062,
"repo_name": "WebCampZg/conference-web",
"id": "fd28436a2ba557cc04ff0898c2ac5c595cff46f0",
"size": "995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usergroups/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "131971"
},
{
"name": "JavaScript",
"bytes": "3928"
},
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "268738"
},
{
"name": "SCSS",
"bytes": "41619"
}
],
"symlink_target": ""
} |
from wdim.server.api.v1 import auth
from wdim.server.api.v1 import document
PREFIX = 'v1'
HANDLERS = (
auth.AuthHandler.as_entry(),
document.HistoryHandler.as_entry(),
document.DocumentHandler.as_entry(),
document.DocumentsHandler.as_entry(),
)
| {
"content_hash": "494ea3b6188e314ae6a57bc5de17a151",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 41,
"avg_line_length": 26.2,
"alnum_prop": 0.7251908396946565,
"repo_name": "chrisseto/Still",
"id": "cd922b97d70ec327d06cde0f2b9e410ff731f6e8",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wdim/server/api/v1/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57236"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
__author__ = 'katharine'
import atexit
import logging
import math
import os
import sys
import threading
import time
import requests
from pebble_tool.version import __version__
from pebble_tool.sdk import sdk_manager
from pebble_tool.util.config import config
from pebble_tool.util.versions import version_to_key
logger = logging.getLogger("pebble_tool.util.updates")
class UpdateChecker(threading.Thread):
def __init__(self, component, current_version, callback):
self.component = component
self.current_version = current_version
self.callback = callback
super(UpdateChecker, self).__init__()
self.daemon = True
self.start()
def run(self):
last_check = config.get('update-checks', {}).get(self.component, {})
if last_check.get('timestamp', 0) < time.time() - 86400: # minus one day
logger.debug("Haven't looked for updates lately; checking...")
try:
latest = sdk_manager.request("/v1/files/{}/latest?channel={}"
.format(self.component, sdk_manager.get_channel()))
except requests.RequestException as e:
logger.info("Update check failed: %s", e)
return
if not 200 <= latest.status_code < 400:
logger.info("Update check failed: %s (%s)", latest.status_code, latest.reason)
return
result = latest.json()
with config.lock:
config.setdefault('update-checks', {})[self.component] = {
'timestamp': time.time(),
'version': result['version'],
'release_notes': result.get('release_notes', None)
}
self._check_version(result['version'], result.get('release_notes', None))
else:
self._check_version(last_check['version'], last_check.get('release_notes', None))
def _check_version(self, new_version, release_notes=None):
if version_to_key(new_version) > version_to_key(self.current_version):
logger.debug("Found an update: %s", new_version)
atexit.register(self.callback, new_version, release_notes)
def _print(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def _handle_sdk_update(version, release_notes=None):
# We know the SDK was new when the version check occurred, but it is possible that it's
# been installed since then. Therefore, check again.
if version not in sdk_manager.list_local_sdk_versions():
_print()
_print("A new SDK, version {0}, is available! Run `pebble sdk install {0}` to get it.".format(version))
if release_notes is not None:
_print(release_notes)
def _handle_tool_update(version, release_notes=None):
_print()
_print("An updated pebble tool, version {}, is available.".format(version))
if release_notes is not None:
_print(release_notes)
if 'PEBBLE_IS_HOMEBREW' in os.environ:
_print("Run `brew update && brew upgrade pebble-sdk` to get it.")
else:
_print("Head to https://developer.getpebble.com/sdk/beta/ to get it.")
def _get_platform():
sys_platform = sys.platform.rstrip('2') # "linux2" on python < 3.3...
return sys_platform + str(int(round(math.log(sys.maxsize, 2)+1)))
def wait_for_update_checks(timeout):
now = time.time()
end = now + timeout
for checker in _checkers:
now = time.time()
if now > end:
break
checker.join(end - time.time())
_checkers = []
def _do_updates():
_checkers.append(UpdateChecker("pebble-tool-{}".format(_get_platform()), __version__, _handle_tool_update))
# Only do the SDK update check if there is actually an SDK installed.
if sdk_manager.get_current_sdk() is not None:
try:
latest_sdk = max(sdk_manager.list_local_sdk_versions(), key=version_to_key)
except ValueError:
latest_sdk = "0"
_checkers.append(UpdateChecker("sdk-core", latest_sdk, _handle_sdk_update))
_do_updates()
| {
"content_hash": "c8ab4e46ff91ca773d8362c5bf29a7d6",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 111,
"avg_line_length": 36.796460176991154,
"alnum_prop": 0.6168831168831169,
"repo_name": "gregoiresage/pebble-tool",
"id": "95fb19b5f9683a47ae2a924108d377dd91b499c9",
"size": "4158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pebble_tool/util/updates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1864"
},
{
"name": "CSS",
"bytes": "2938"
},
{
"name": "JavaScript",
"bytes": "8517"
},
{
"name": "Python",
"bytes": "160331"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
} |
'''
Author:
Description:
'''
from core.broadcast import say, bang
from core.config.settings import logger
import sys
class Reaction:
"""class Reaction"""
response = ''
request = ''
def __str__(self):
return 'My new reaction'
@classmethod
def __init__(self, *args, **kwargs):
""" original request string """
#logger.info(args)
#logger.info(kwargs)
#logger.info(kwargs.get('req_obj'))
#get request object
self.req_obj = kwargs.pop('req_obj')
#request word sequence
self.request = self.req_obj.get('request', '')
#request received from (julius, jabber any other resources)
self.req_from = self.req_obj.get('from', '')
self.response = ''
@classmethod
def run(self):
"""default method"""
response = "\nRaspberry PI type B"
response += "\nPlatform: %s" % sys.platform
response += "\nPython: %s" % sys.version
if self.req_from == 'jabber':
todo = { 'text' : response, 'jmsg' : response, 'type': 'response' }
self.response = todo
if self.req_from == 'julius':
bang()
todo = { 'say': response , 'text' : response ,'type': 'response' }
self.response = say(self.request.replace('say', '').upper())
return self.response
| {
"content_hash": "93d56eeb4861fc7a14c7e67ccac760b0",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 26.25,
"alnum_prop": 0.5545787545787546,
"repo_name": "vsilent/smarty-bot",
"id": "88add2920ef71d5d3f67452d2263b7a5b13c59eb",
"size": "3954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/brain/show/me/system/info/reaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2163"
},
{
"name": "Dockerfile",
"bytes": "741"
},
{
"name": "HTML",
"bytes": "4223"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "974421"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
import unittest
from typing import Any, Dict, List
from unittest import mock
from google.cloud.pubsub_v1.types import ReceivedMessage
from airflow.providers.google.cloud.operators.pubsub import (
PubSubCreateSubscriptionOperator,
PubSubCreateTopicOperator,
PubSubDeleteSubscriptionOperator,
PubSubDeleteTopicOperator,
PubSubPublishMessageOperator,
PubSubPullOperator,
)
TASK_ID = 'test-task-id'
TEST_PROJECT = 'test-project'
TEST_TOPIC = 'test-topic'
TEST_SUBSCRIPTION = 'test-subscription'
TEST_MESSAGES = [
{'data': b'Hello, World!', 'attributes': {'type': 'greeting'}},
{'data': b'Knock, knock'},
{'attributes': {'foo': ''}},
]
class TestPubSubTopicCreateOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_failifexists(self, mock_hook):
operator = PubSubCreateTopicOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_exists=True
)
operator.execute(None)
mock_hook.return_value.create_topic.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_exists=True,
labels=None,
message_storage_policy=None,
kms_key_name=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_succeedifexists(self, mock_hook):
operator = PubSubCreateTopicOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_exists=False
)
operator.execute(None)
mock_hook.return_value.create_topic.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_exists=False,
labels=None,
message_storage_policy=None,
kms_key_name=None,
retry=None,
timeout=None,
metadata=None,
)
class TestPubSubTopicDeleteOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubDeleteTopicOperator(task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC)
operator.execute(None)
mock_hook.return_value.delete_topic.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_not_exists=False,
retry=None,
timeout=None,
metadata=None,
)
class TestPubSubSubscriptionCreateOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
response = operator.execute(None)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=None,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=None,
timeout=None,
metadata=None,
)
assert response == TEST_SUBSCRIPTION
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_different_project_ids(self, mock_hook):
another_project = 'another-project'
operator = PubSubCreateSubscriptionOperator(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=another_project,
task_id=TASK_ID,
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
response = operator.execute(None)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=another_project,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=None,
timeout=None,
metadata=None,
)
assert response == TEST_SUBSCRIPTION
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_no_subscription(self, mock_hook):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
response = operator.execute(None)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=None,
subscription_project_id=None,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=None,
timeout=None,
metadata=None,
)
assert response == TEST_SUBSCRIPTION
class TestPubSubSubscriptionDeleteOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubDeleteSubscriptionOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION
)
operator.execute(None)
mock_hook.return_value.delete_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
fail_if_not_exists=False,
retry=None,
timeout=None,
metadata=None,
)
class TestPubSubPublishOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_publish(self, mock_hook):
operator = PubSubPublishMessageOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
messages=TEST_MESSAGES,
)
operator.execute(None)
mock_hook.return_value.publish.assert_called_once_with(
project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES
)
class TestPubSubPullOperator(unittest.TestCase):
def _generate_messages(self, count):
return [
ReceivedMessage(
ack_id=f"{i}",
message={
"data": f'Message {i}'.encode(),
"attributes": {"type": "generated message"},
},
)
for i in range(1, count + 1)
]
def _generate_dicts(self, count):
return [ReceivedMessage.to_dict(m) for m in self._generate_messages(count)]
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_no_messages(self, mock_hook):
operator = PubSubPullOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
)
mock_hook.return_value.pull.return_value = []
assert [] == operator.execute({})
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_with_ack_messages(self, mock_hook):
operator = PubSubPullOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
ack_messages=True,
)
generated_messages = self._generate_messages(5)
generated_dicts = self._generate_dicts(5)
mock_hook.return_value.pull.return_value = generated_messages
assert generated_dicts == operator.execute({})
mock_hook.return_value.acknowledge.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages=generated_messages,
)
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_with_messages_callback(self, mock_hook):
generated_messages = self._generate_messages(5)
messages_callback_return_value = 'asdfg'
def messages_callback(
pulled_messages: List[ReceivedMessage],
context: Dict[str, Any],
):
assert pulled_messages == generated_messages
assert isinstance(context, dict)
for key in context.keys():
assert isinstance(key, str)
return messages_callback_return_value
messages_callback = mock.Mock(side_effect=messages_callback)
operator = PubSubPullOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages_callback=messages_callback,
)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute({})
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=True
)
messages_callback.assert_called_once()
assert response == messages_callback_return_value
| {
"content_hash": "b226dc74d4733b401015444a6e9a5669",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 108,
"avg_line_length": 35.57337883959045,
"alnum_prop": 0.6215101218459177,
"repo_name": "apache/incubator-airflow",
"id": "a8bc046ea918666e4994ea347b636aa2429eda7f",
"size": "11211",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/operators/test_pubsub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, EmptyResults, log_query
from haystack.constants import DJANGO_CT, DJANGO_ID, ID
from haystack.exceptions import MissingDependency, MoreLikeThisError, SkipDocument
from haystack.inputs import Clean, Exact, PythonData, Raw
from haystack.models import SearchResult
from haystack.utils import log as logging
from haystack.utils import get_identifier, get_model_ct
from haystack.utils.app_loading import haystack_get_model
try:
from pysolr import Solr, SolrError
except ImportError:
raise MissingDependency("The 'solr' backend requires the installation of 'pysolr'. Please refer to the documentation.")
class SolrSearchBackend(BaseSearchBackend):
# Word reserved by Solr for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Solr for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':', '/',
)
def __init__(self, connection_alias, **connection_options):
super(SolrSearchBackend, self).__init__(connection_alias, **connection_options)
if not 'URL' in connection_options:
raise ImproperlyConfigured("You must specify a 'URL' in your settings for connection '%s'." % connection_alias)
self.conn = Solr(connection_options['URL'], timeout=self.timeout, **connection_options.get('KWARGS', {}))
self.log = logging.getLogger('haystack')
def update(self, index, iterable, commit=True):
docs = []
for obj in iterable:
try:
docs.append(index.full_prepare(obj))
except SkipDocument:
self.log.debug(u"Indexing for object `%s` skipped", obj)
except UnicodeDecodeError:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(u"UnicodeDecodeError while preparing object for update", exc_info=True, extra={
"data": {
"index": index,
"object": get_identifier(obj)
}
})
if len(docs) > 0:
try:
self.conn.add(docs, commit=commit, boost=index.get_field_weights())
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Solr: %s", e)
def remove(self, obj_or_string, commit=True):
solr_id = get_identifier(obj_or_string)
try:
kwargs = {
'commit': commit,
'id': solr_id
}
self.conn.delete(**kwargs)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Solr: %s", solr_id, e)
def clear(self, models=[], commit=True):
try:
if not models:
# *:* matches all docs in Solr
self.conn.delete(q='*:*', commit=commit)
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
self.conn.delete(q=" OR ".join(models_to_delete), commit=commit)
if commit:
# Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99
self.conn.optimize()
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
if len(models):
self.log.error("Failed to clear Solr index of models '%s': %s", ','.join(models_to_delete), e)
else:
self.log.error("Failed to clear Solr index: %s", e)
@log_query
def search(self, query_string, **kwargs):
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
search_kwargs = self.build_search_kwargs(query_string, **kwargs)
try:
raw_results = self.conn.search(query_string, **search_kwargs)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to query Solr using '%s': %s", query_string, e)
raw_results = EmptyResults()
return self._process_results(raw_results, highlight=kwargs.get('highlight'), result_class=kwargs.get('result_class', SearchResult), distance_point=kwargs.get('distance_point'))
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None, stats=None):
kwargs = {'fl': '* score'}
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs['fl'] = fields
if sort_by is not None:
if sort_by in ['distance asc', 'distance desc'] and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point['point'].get_coords()
kwargs['sfield'] = distance_point['field']
kwargs['pt'] = '%s,%s' % (lat, lng)
if sort_by == 'distance asc':
kwargs['sort'] = 'geodist() asc'
else:
kwargs['sort'] = 'geodist() desc'
else:
if sort_by.startswith('distance '):
warnings.warn("In order to sort by distance, you must call the '.distance(...)' method.")
# Regular sorting.
kwargs['sort'] = sort_by
if start_offset is not None:
kwargs['start'] = start_offset
if end_offset is not None:
kwargs['rows'] = end_offset - start_offset
if highlight is True:
kwargs['hl'] = 'true'
kwargs['hl.fragsize'] = '200'
if self.include_spelling is True:
kwargs['spellcheck'] = 'true'
kwargs['spellcheck.collate'] = 'true'
kwargs['spellcheck.count'] = 1
if spelling_query:
kwargs['spellcheck.q'] = spelling_query
if facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.field'] = facets.keys()
for facet_field, options in facets.items():
for key, value in options.items():
kwargs['f.%s.facet.%s' % (facet_field, key)] = self.conn._from_python(value)
if date_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.date'] = date_facets.keys()
kwargs['facet.date.other'] = 'none'
for key, value in date_facets.items():
kwargs["f.%s.facet.date.start" % key] = self.conn._from_python(value.get('start_date'))
kwargs["f.%s.facet.date.end" % key] = self.conn._from_python(value.get('end_date'))
gap_by_string = value.get('gap_by').upper()
gap_string = "%d%s" % (value.get('gap_amount'), gap_by_string)
if value.get('gap_amount') != 1:
gap_string += "S"
kwargs["f.%s.facet.date.gap" % key] = '+%s/%s' % (gap_string, gap_by_string)
if query_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.query'] = ["%s:%s" % (field, value) for field, value in query_facets]
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if narrow_queries is not None:
kwargs['fq'] = list(narrow_queries)
if stats:
kwargs['stats'] = "true"
for k in stats.keys():
kwargs['stats.field'] = k
for facet in stats[k]:
kwargs['f.%s.stats.facet' % k] = facet
if within is not None:
from haystack.utils.geo import generate_bounding_box
kwargs.setdefault('fq', [])
((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(within['point_1'], within['point_2'])
# Bounding boxes are min, min TO max, max. Solr's wiki was *NOT*
# very clear on this.
bbox = '%s:[%s,%s TO %s,%s]' % (within['field'], min_lat, min_lng, max_lat, max_lng)
kwargs['fq'].append(bbox)
if dwithin is not None:
kwargs.setdefault('fq', [])
lng, lat = dwithin['point'].get_coords()
geofilt = '{!geofilt pt=%s,%s sfield=%s d=%s}' % (lat, lng, dwithin['field'], dwithin['distance'].km)
kwargs['fq'].append(geofilt)
# Check to see if the backend should try to include distances
# (Solr 4.X+) in the results.
if self.distance_available and distance_point:
# In early testing, you can't just hand Solr 4.X a proper bounding box
# & request distances. To enable native distance would take calculating
# a center point & a radius off the user-provided box, which kinda
# sucks. We'll avoid it for now, since Solr 4.x's release will be some
# time yet.
# kwargs['fl'] += ' _dist_:geodist()'
pass
return kwargs
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None, models=None,
limit_to_registered_models=None, result_class=None, **kwargs):
from haystack import connections
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = connections[self.connection_alias].get_unified_index().get_index(model_klass)
field_name = index.get_content_field()
params = {
'fl': '*,score',
}
if start_offset is not None:
params['start'] = start_offset
if end_offset is not None:
params['rows'] = end_offset
narrow_queries = set()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if additional_query_string:
narrow_queries.add(additional_query_string)
if narrow_queries:
params['fq'] = list(narrow_queries)
query = "%s:%s" % (ID, get_identifier(model_instance))
try:
raw_results = self.conn.more_like_this(query, field_name, **params)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to fetch More Like This from Solr for document '%s': %s", query, e)
raw_results = EmptyResults()
return self._process_results(raw_results, result_class=result_class)
def _process_results(self, raw_results, highlight=False, result_class=None, distance_point=None):
from haystack import connections
results = []
hits = raw_results.hits
facets = {}
stats = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if hasattr(raw_results,'stats'):
stats = raw_results.stats.get('stats_fields',{})
if hasattr(raw_results, 'facets'):
facets = {
'fields': raw_results.facets.get('facet_fields', {}),
'dates': raw_results.facets.get('facet_dates', {}),
'queries': raw_results.facets.get('facet_queries', {}),
}
for key in ['fields']:
for facet_field in facets[key]:
# Convert to a two-tuple, as Solr's json format returns a list of
# pairs.
facets[key][facet_field] = list(zip(facets[key][facet_field][::2], facets[key][facet_field][1::2]))
if self.include_spelling is True:
if hasattr(raw_results, 'spellcheck'):
if len(raw_results.spellcheck.get('suggestions', [])):
# For some reason, it's an array of pairs. Pull off the
# collated result from the end.
spelling_suggestion = raw_results.spellcheck.get('suggestions')[-1]
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
for raw_result in raw_results.docs:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = haystack_get_model(app_label, model_name)
if model and model in indexed_models:
index = unified_index.get_index(model)
index_field_map = index.field_map
for key, value in raw_result.items():
string_key = str(key)
# re-map key if alternate name used
if string_key in index_field_map:
string_key = index_field_map[key]
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self.conn._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
if distance_point:
additional_fields['_point_of_origin'] = distance_point
if raw_result.get('__dist__'):
from haystack.utils.geo import Distance
additional_fields['_distance'] = Distance(km=float(raw_result['__dist__']))
else:
additional_fields['_distance'] = None
result = result_class(app_label, model_name, raw_result[DJANGO_ID], raw_result['score'], **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'stats': stats,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def build_schema(self, fields):
content_field_name = ''
schema_fields = []
for field_name, field_class in fields.items():
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text_en',
'indexed': 'true',
'stored': 'true',
'multi_valued': 'false',
}
if field_class.document is True:
content_field_name = field_class.index_fieldname
# DRL_FIXME: Perhaps move to something where, if none of these
# checks succeed, call a custom method on the form that
# returns, per-backend, the right type of storage?
if field_class.field_type in ['date', 'datetime']:
field_data['type'] = 'date'
elif field_class.field_type == 'integer':
field_data['type'] = 'long'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
elif field_class.field_type == 'location':
field_data['type'] = 'location'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
if field_class.stored is False:
field_data['stored'] = 'false'
# Do this last to override `text` fields.
if field_class.indexed is False:
field_data['indexed'] = 'false'
# If it's text and not being indexed, we probably don't want
# to do the normal lowercase/tokenize/stemming/etc. dance.
if field_data['type'] == 'text_en':
field_data['type'] = 'string'
# If it's a ``FacetField``, make sure we don't postprocess it.
if hasattr(field_class, 'facet_for'):
# If it's text, it ought to be a string.
if field_data['type'] == 'text_en':
field_data['type'] = 'string'
schema_fields.append(field_data)
return (content_field_name, schema_fields)
def extract_file_contents(self, file_obj):
"""Extract text and metadata from a structured file (PDF, MS Word, etc.)
Uses the Solr ExtractingRequestHandler, which is based on Apache Tika.
See the Solr wiki for details:
http://wiki.apache.org/solr/ExtractingRequestHandler
Due to the way the ExtractingRequestHandler is implemented it completely
replaces the normal Haystack indexing process with several unfortunate
restrictions: only one file per request, the extracted data is added to
the index with no ability to modify it, etc. To simplify the process and
allow for more advanced use we'll run using the extract-only mode to
return the extracted data without adding it to the index so we can then
use it within Haystack's normal templating process.
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
try:
return self.conn.extract(file_obj)
except Exception as e:
self.log.warning(u"Unable to extract file contents: %s", e,
exc_info=True, extra={"data": {"file": file_obj}})
return None
class SolrSearchQuery(BaseSearchQuery):
def matching_all_fragment(self):
return '*:*'
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ''
if not hasattr(value, 'input_type_name'):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if isinstance(value, six.string_types):
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend.conn._from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
index_fieldname = ''
else:
index_fieldname = u'%s:' % connections[self._using].get_unified_index().get_index_fieldname(field)
filter_types = {
'contains': u'%s',
'startswith': u'%s*',
'exact': u'%s',
'gt': u'{%s TO *}',
'gte': u'[%s TO *]',
'lt': u'{* TO %s}',
'lte': u'[* TO %s]',
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in ['contains', 'startswith']:
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
for possible_value in prepared_value.split(' '):
terms.append(filter_types[filter_type] % self.backend.conn._from_python(possible_value))
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = u"(%s)" % " AND ".join(terms)
elif filter_type == 'in':
in_options = []
for possible_value in prepared_value:
in_options.append(u'"%s"' % self.backend.conn._from_python(possible_value))
query_frag = u"(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend.conn._from_python(prepared_value[0])
end = self.backend.conn._from_python(prepared_value[1])
query_frag = u'["%s" TO "%s"]' % (start, end)
elif filter_type == 'exact':
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if value.input_type_name != 'exact':
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not isinstance(value, Raw):
if not query_frag.startswith('(') and not query_frag.endswith(')'):
query_frag = "(%s)" % query_frag
return u"%s%s" % (index_fieldname, query_frag)
def build_alt_parser_query(self, parser_name, query_string='', **kwargs):
if query_string:
query_string = Clean(query_string).prepare(self)
kwarg_bits = []
for key in sorted(kwargs.keys()):
if isinstance(kwargs[key], six.string_types) and ' ' in kwargs[key]:
kwarg_bits.append(u"%s='%s'" % (key, kwargs[key]))
else:
kwarg_bits.append(u"%s=%s" % (key, kwargs[key]))
return u'_query_:"{!%s %s}%s"' % (parser_name, Clean(' '.join(kwarg_bits)), query_string)
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class
}
order_by_list = None
if self.order_by:
if order_by_list is None:
order_by_list = []
for order_by in self.order_by:
if order_by.startswith('-'):
order_by_list.append('%s desc' % order_by[1:])
else:
order_by_list.append('%s asc' % order_by)
search_kwargs['sort_by'] = ", ".join(order_by_list)
if self.date_facets:
search_kwargs['date_facets'] = self.date_facets
if self.distance_point:
search_kwargs['distance_point'] = self.distance_point
if self.dwithin:
search_kwargs['dwithin'] = self.dwithin
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset
if self.facets:
search_kwargs['facets'] = self.facets
if self.fields:
search_kwargs['fields'] = self.fields
if self.highlight:
search_kwargs['highlight'] = self.highlight
if self.models:
search_kwargs['models'] = self.models
if self.narrow_queries:
search_kwargs['narrow_queries'] = self.narrow_queries
if self.query_facets:
search_kwargs['query_facets'] = self.query_facets
if self.within:
search_kwargs['within'] = self.within
if spelling_query:
search_kwargs['spelling_query'] = spelling_query
if self.stats:
search_kwargs['stats'] = self.stats
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._stats = results.get('stats',{})
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
additional_query_string = self.build_query()
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
'models': self.models
}
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset - self.start_offset
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
class SolrEngine(BaseEngine):
backend = SolrSearchBackend
query = SolrSearchQuery
| {
"content_hash": "27f20eb6073e1be583aef18cfd4af431",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 184,
"avg_line_length": 39.30307262569833,
"alnum_prop": 0.5475640524501617,
"repo_name": "vitalyvolkov/django-haystack",
"id": "4f301c925d62ef6becbacd45dca0cf76e3965360",
"size": "28160",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "haystack/backends/solr_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "752773"
},
{
"name": "Shell",
"bytes": "1809"
}
],
"symlink_target": ""
} |
"""
Django settings for training project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=*p9ko#afwa@qmr%icu!v04=_jwyhb=na9-r0ji&3hh!nn)qwc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'ticketing_system.apps.TicketingSystemConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'training.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'training.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ticketing_system',
'HOST': 'localhost',
'USER': 'debian',
'PASSWORD': 'debian123',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "ff8e14c5ad801e6c78f5a3e4934b177d",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 91,
"avg_line_length": 25.975806451612904,
"alnum_prop": 0.6827072337783298,
"repo_name": "aberon10/training",
"id": "abc1832ba96d48aaac95aaca499f7d4a31801fb2",
"size": "3221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "training/training/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9156"
},
{
"name": "HTML",
"bytes": "8567"
},
{
"name": "JavaScript",
"bytes": "1281"
},
{
"name": "Python",
"bytes": "20154"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
from google.appengine.api import datastore_errors
import logging
import re
def stringValidator(prop, value):
cleanValue = value.strip()
if prop._name == 'email' or prop._name == 'requester_email':
cleanValue = cleanValue.lower()
return cleanValue
def phoneValidator(prop, value):
pn = re.sub('[^\d]', '', value)
if len(pn) == 10:
return pn
elif len(pn) == 11 and pn[0] == '1':
return pn[1:] # remove +1 US country code
else:
logging.info(value)
raise datastore_errors.BadValueError(prop._name)
class Visitor(ndb.Model):
name = ndb.StringProperty(default=None, validator=stringValidator)
email = ndb.StringProperty(default=None, validator=stringValidator)
checked_in = ndb.BooleanProperty(default=False)
org = ndb.StringProperty(default=None, validator=stringValidator)
def asDict(self, include_keys):
return {key: getattr(self, key, None) for key in include_keys}
class Rep(ndb.Model):
name = ndb.StringProperty(default=None, validator=stringValidator)
email = ndb.StringProperty(default=None, validator=stringValidator)
phone_number = ndb.StringProperty(default=None, validator=phoneValidator)
checked_in = ndb.BooleanProperty(default=False)
company = ndb.StringProperty(default=None, validator=stringValidator)
shirt_gen = ndb.StringProperty(choices=['M', 'W'])
shirt_size = ndb.StringProperty(choices=['XS', 'S', 'M', 'L', 'XL', 'XXL'])
def asDict(self, include_keys):
me = {key: getattr(self, key, None) for key in include_keys}
if 'status' in include_keys:
me['status'] = 'confirmed'
return me
class Volunteer(ndb.Model):
name = ndb.StringProperty(default=None, validator=stringValidator)
email = ndb.StringProperty(default=None, validator=stringValidator)
checked_in = ndb.BooleanProperty(default=False)
phone_number = ndb.StringProperty(default=None, validator=phoneValidator)
role = ndb.StringProperty(default=None, validator=stringValidator)
shirt_gen = ndb.StringProperty(choices=['M', 'W'])
shirt_size = ndb.StringProperty(choices=['XS', 'S', 'M', 'L', 'XL', 'XXL'])
def asDict(self, include_keys):
me = {key: getattr(self, key, None) for key in include_keys}
if 'status' in include_keys:
me['status'] = 'confirmed'
return me
class CheckInSession(ndb.Model):
user = ndb.StringProperty(default=None)
active = ndb.BooleanProperty(default=True)
| {
"content_hash": "f0f5519b91815f8291e33b491cf0d4d9",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 37.83582089552239,
"alnum_prop": 0.6848126232741617,
"repo_name": "hackatbrown/2015.hackatbrown.org",
"id": "c13e58f6f73f23d3fdd31aaab9c56fcf0fce807d",
"size": "2535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hack-at-brown-2015/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2826195"
},
{
"name": "HTML",
"bytes": "853190"
},
{
"name": "JavaScript",
"bytes": "3333401"
},
{
"name": "Python",
"bytes": "3830632"
}
],
"symlink_target": ""
} |
"""
Provides classes for the analysis of dynamical systems and time series based
on recurrence plots, including measures of recurrence quantification
analysis (RQA) and recurrence network analysis.
"""
# array object and fast numerics
import numpy as np
from .. import Network
from .joint_recurrence_plot import JointRecurrencePlot
#
# Class definitions
#
class JointRecurrenceNetwork(JointRecurrencePlot, Network):
"""
Class JointRecurrenceNetwork for generating and quantitatively analyzing
joint recurrence networks.
For a joint recurrence network, time series x and y need to have the same
length! Formally, nodes are identified with sampling points in time, while
an undirected link (i,j) is introduced if x at time i is recurrent to x at
time j and also y at time i is recurrent to y at time j. Self-loops are
excluded in this undirected network representation.
More information on the theory and applications of joint recurrence
networks can be found in [Feldhoff2013]_.
**Examples:**
- Create an instance of JointRecurrenceNetwork with a fixed recurrence
threshold and without embedding::
JointRecurrenceNetwork(x, y, threshold=(0.1,0.2))
- Create an instance of JointRecurrenceNetwork with a fixed recurrence
threshold in units of STD and without embedding::
JointRecurrenceNetwork(x, y, threshold_std=(0.03,0.05))
- Create an instance of JointRecurrenceNetwork at a fixed recurrence rate
and using time delay embedding::
JointRecurrenceNetwork(
x, y, dim=(3,5), tau=(2,1),
recurrence_rate=(0.05,0.04)).recurrence_rate()
"""
#
# Internal methods
#
def __init__(self, x, y, metric=("supremum", "supremum"),
normalize=False, lag=0, silence_level=0, **kwds):
"""
Initialize an instance of JointRecurrenceNetwork.
.. note::
For a joint recurrence network, time series x and y need to have the
same length!
Creates an embedding of the given time series x and y, calculates a
joint recurrence plot from the embedding and then creates a Network
object from the joint recurrence plot, interpreting the joint
recurrence matrix as the adjacency matrix of an undirected complex
network.
Either recurrence thresholds ``threshold``/``threshold_std`` or
recurrence rates ``recurrence_rate`` have to be given as keyword
arguments.
Embedding is only supported for scalar time series. If embedding
dimension ``dim`` and delay ``tau`` are **both** given as keyword
arguments, embedding is applied. Multidimensional time series are
processed as is by default.
:type x: 2D Numpy array (time, dimension)
:arg x: The time series x to be analyzed, can be scalar or
multi-dimensional.
:type y: 2D Numpy array (time, dimension)
:arg y: The time series y to be analyzed, can be scalar or
multi-dimensional.
:type metric: tuple of string
:arg metric: The metric for measuring distances in phase space
("manhattan", "euclidean", "supremum"). Give separately for each
time series.
:type normalize: tuple of bool
:arg normalize: Decide whether to normalize the time series to zero
mean and unit standard deviation. Give separately for each time
series.
:arg number lag: To create a delayed version of the JRP.
:arg number silence_level: Inverse level of verbosity of the object.
:type threshold: tuple of number
:keyword threshold: The recurrence threshold keyword for generating the
recurrence plot using a fixed threshold. Give separately for each
time series.
:type threshold_std: tuple of number
:keyword threshold_std: The recurrence threshold keyword for generating
the recurrence plot using a fixed threshold in units of the time
series' STD. Give separately for each time series.
:type recurrence_rate: tuple of number
:keyword recurrence_rate: The recurrence rate keyword for generating
the recurrence plot using a fixed recurrence rate. Give separately
for each time series.
:type dim: tuple of number
:keyword dim: The embedding dimension. Give separately for each time
series.
:type tau: tuple of number
:keyword tau: The embedding delay. Give separately for each time
series.
"""
# Check for consistency
if np.abs(lag) < x.shape[0]:
if x.shape[0] == y.shape[0]:
# Initialize the underlying RecurrencePlot object
JointRecurrencePlot.__init__(self, x, y, metric, normalize,
lag, silence_level, **kwds)
# Set diagonal of JR to zero to avoid self-loops in the joint
# recurrence network
A = self.JR - np.eye((self.N-np.abs(lag)), dtype="int8")
# Create a Network object interpreting the recurrence matrix
# as the graph adjacency matrix. Joint recurrence networks
# are undirected by definition.
Network.__init__(self, A, directed=False,
silence_level=silence_level)
else:
raise ValueError(
"Both time series x and y need to have the same length!")
else:
raise ValueError(
"Delay value (lag) must not exceed length of time series!")
def __str__(self):
"""
Returns a string representation.
"""
return 'JointRecurrenceNetwork:\n%s\n%s' % (
JointRecurrencePlot.__str__(self), Network.__str__(self))
def clear_cache(self):
"""
Clean up memory by deleting information that can be recalculated from
basic data.
Extends the clean up methods of the parent classes.
"""
# Call clean up of RecurrencePlot
JointRecurrencePlot.clear_cache(self)
# Call clean up of Network
Network.clear_cache(self)
#
# Methods to handle recurrence networks
#
def set_fixed_threshold(self, threshold):
"""
Create a joint recurrence network at fixed thresholds.
:type threshold: tuple of number
:arg threshold: The threshold. Give for each time series separately.
"""
# Set fixed threshold on recurrence plot level
JointRecurrencePlot.set_fixed_threshold(self, threshold)
# Set diagonal of JR to zero to avoid self-loops in the joint
# recurrence network
A = self.JR.copy()
A.flat[::self.N+1] = 0
# Create a Network object interpreting the recurrence matrix as the
# graph adjacency matrix. Joint recurrence networks are undirected by
# definition.
Network.__init__(self, A, directed=False,
silence_level=self.silence_level)
def set_fixed_threshold_std(self, threshold_std):
"""
Create a joint recurrence network at fixed thresholds in units of the
standard deviation of the time series.
:type threshold_std: tuple of number
:arg threshold_std: The threshold in units of standard deviation. Give
for each time series separately.
"""
# Set fixed threshold on recurrence plot level
JointRecurrencePlot.set_fixed_threshold_std(self, threshold_std)
# Set diagonal of JR to zero to avoid self-loops in the joint
# recurrence network
A = self.JR.copy()
A.flat[::self.N+1] = 0
# Create a Network object interpreting the recurrence matrix as the
# graph adjacency matrix. Joint recurrence networks are undirected by
# definition.
Network.__init__(self, A, directed=False,
silence_level=self.silence_level)
def set_fixed_recurrence_rate(self, density):
"""
Create a joint recurrence network at fixed link densities (recurrence
rates).
:type density: tuple of number
:arg density: The link density / recurrence rate. Give for each time
series separately.
"""
# Set fixed recurrence rate on recurrence plot level
JointRecurrencePlot.set_fixed_recurrence_rate(self, density)
# Set diagonal of JR to zero to avoid self-loops in the joint
# recurrence network
A = self.JR.copy()
A.flat[::self.N+1] = 0
# Create a Network object interpreting the recurrence matrix as the
# graph adjacency matrix. Joint recurrence networks are undirected by
# definition.
Network.__init__(self, A, directed=False,
silence_level=self.silence_level)
| {
"content_hash": "a3e94862173eadc27250d3074581ceab",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 40.084070796460175,
"alnum_prop": 0.6330720830113699,
"repo_name": "wbarfuss/pyunicorn",
"id": "0c55fa038c50091b42b80c1c048af5dfa8b72234",
"size": "9291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyunicorn/timeseries/joint_recurrence_network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1067604"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.i18n import i18n_patterns
from django.views.generic import TemplateView, RedirectView
from django.utils.module_loading import import_string
import os.path
import zerver.forms
from zproject import dev_urls
from zproject.legacy_urls import legacy_urls
from zerver.views.integrations import IntegrationView, APIView, HelpView
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.webhooks import github_dispatcher
from django.contrib.auth.views import (login, password_reset,
password_reset_done, password_reset_confirm, password_reset_complete)
import zerver.tornado.views
import zerver.views
import zerver.views.auth
import zerver.views.compatibility
import zerver.views.home
import zerver.views.registration
import zerver.views.zephyr
import zerver.views.users
import zerver.views.unsubscribe
import zerver.views.integrations
import zerver.views.user_settings
import confirmation.views
from zerver.lib.rest import rest_dispatch
# NB: There are several other pieces of code which route requests by URL:
#
# - legacy_urls.py contains API endpoint written before the redesign
# and should not be added to.
#
# - runtornado.py has its own URL list for Tornado views. See the
# invocation of web.Application in that file.
#
# - The Nginx config knows which URLs to route to Django or Tornado.
#
# - Likewise for the local dev server in tools/run-dev.py.
# These views serve pages (HTML). As such, their internationalization
# must depend on the url.
#
# If you're adding a new page to the website (as opposed to a new
# endpoint for use by code), you should add it here.
i18n_urls = [
url(r'^$', zerver.views.home.home, name='zerver.views.home.home'),
# We have a desktop-specific landing page in case we change our /
# to not log in in the future. We don't want to require a new
# desktop app build for everyone in that case
url(r'^desktop_home/$', zerver.views.home.desktop_home, name='zerver.views.home.desktop_home'),
url(r'^accounts/login/sso/$', zerver.views.auth.remote_user_sso, name='login-sso'),
url(r'^accounts/login/jwt/$', zerver.views.auth.remote_user_jwt, name='login-jwt'),
url(r'^accounts/login/social/(\w+)$', zerver.views.auth.start_social_login, name='login-social'),
url(r'^accounts/login/google/$', zerver.views.auth.start_google_oauth2, name='zerver.views.auth.start_google_oauth2'),
url(r'^accounts/login/google/send/$',
zerver.views.auth.send_oauth_request_to_google,
name='zerver.views.auth.send_oauth_request_to_google'),
url(r'^accounts/login/google/done/$', zerver.views.auth.finish_google_oauth2, name='zerver.views.auth.finish_google_oauth2'),
url(r'^accounts/login/subdomain/$', zerver.views.auth.log_into_subdomain, name='zerver.views.auth.log_into_subdomain'),
url(r'^accounts/login/local/$', zerver.views.auth.dev_direct_login, name='zerver.views.auth.dev_direct_login'),
# We have two entries for accounts/login to allow reverses on the Django
# view we're wrapping to continue to function.
url(r'^accounts/login/', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'),
url(r'^accounts/login/', login, {'template_name': 'zerver/login.html'},
name='django.contrib.auth.views.login'),
url(r'^accounts/logout/', zerver.views.auth.logout_then_login, name='zerver.views.auth.logout_then_login'),
url(r'^accounts/webathena_kerberos_login/',
zerver.views.zephyr.webathena_kerberos_login,
name='zerver.views.zephyr.webathena_kerberos_login'),
url(r'^accounts/password/reset/$', password_reset,
{'post_reset_redirect': '/accounts/password/reset/done/',
'template_name': 'zerver/reset.html',
'email_template_name': 'registration/password_reset_email.txt',
'password_reset_form': zerver.forms.ZulipPasswordResetForm,
}, name='django.contrib.auth.views.password_reset'),
url(r'^accounts/password/reset/done/$', password_reset_done,
{'template_name': 'zerver/reset_emailed.html'}),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$',
password_reset_confirm,
{'post_reset_redirect': '/accounts/password/done/',
'template_name': 'zerver/reset_confirm.html',
'set_password_form': zerver.forms.LoggingSetPasswordForm},
name='django.contrib.auth.views.password_reset_confirm'),
url(r'^accounts/password/done/$', password_reset_complete,
{'template_name': 'zerver/reset_done.html'}),
# Avatar
url(r'^avatar/(?P<email_or_id>[\S]+)?/(?P<medium>[\S]+)?', zerver.views.users.avatar, name='zerver.views.users.avatar'),
url(r'^avatar/(?P<email_or_id>[\S]+)?', zerver.views.users.avatar, name='zerver.views.users.avatar'),
# Registration views, require a confirmation ID.
url(r'^accounts/home/', zerver.views.registration.accounts_home,
name='zerver.views.registration.accounts_home'),
url(r'^accounts/send_confirm/(?P<email>[\S]+)?',
TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'), name='send_confirm'),
url(r'^accounts/register/', zerver.views.registration.accounts_register,
name='zerver.views.registration.accounts_register'),
url(r'^accounts/do_confirm/(?P<confirmation_key>[\w]+)', confirmation.views.confirm, name='confirmation.views.confirm'),
url(r'^accounts/confirm_new_email/(?P<confirmation_key>[\w]+)',
zerver.views.user_settings.confirm_email_change,
name='zerver.views.user_settings.confirm_email_change'),
# Email unsubscription endpoint. Allows for unsubscribing from various types of emails,
# including the welcome emails (day 1 & 2), missed PMs, etc.
url(r'^accounts/unsubscribe/(?P<type>[\w]+)/(?P<token>[\w]+)',
zerver.views.unsubscribe.email_unsubscribe, name='zerver.views.unsubscribe.email_unsubscribe'),
# Portico-styled page used to provide email confirmation of terms acceptance.
url(r'^accounts/accept_terms/$', zerver.views.home.accounts_accept_terms, name='zerver.views.home.accounts_accept_terms'),
# Realm Creation
url(r'^create_realm/$', zerver.views.registration.create_realm, name='zerver.views.create_realm'),
url(r'^create_realm/(?P<creation_key>[\w]+)$', zerver.views.registration.create_realm, name='zerver.views.create_realm'),
# Login/registration
url(r'^register/$', zerver.views.registration.accounts_home, name='register'),
url(r'^login/$', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'),
# A registration page that passes through the domain, for totally open realms.
url(r'^register/(?P<realm_str>\S+)/$', zerver.views.registration.accounts_home_with_realm_str,
name='zerver.views.registration.accounts_home_with_realm_str'),
# API and integrations documentation
url(r'^api/$', APIView.as_view(template_name='zerver/api.html')),
url(r'^api/endpoints/$', zerver.views.integrations.api_endpoint_docs, name='zerver.views.integrations.api_endpoint_docs'),
url(r'^integrations/$', IntegrationView.as_view()),
url(r'^about/$', TemplateView.as_view(template_name='zerver/about.html')),
url(r'^apps/$', TemplateView.as_view(template_name='zerver/apps.html')),
url(r'^robots\.txt$', RedirectView.as_view(url='/static/robots.txt', permanent=True)),
# Landing page, features pages, signup form, etc.
url(r'^hello/$', TemplateView.as_view(template_name='zerver/hello.html'), name='landing-page'),
url(r'^new-user/$', RedirectView.as_view(url='/hello', permanent=True)),
url(r'^features/$', TemplateView.as_view(template_name='zerver/features.html')),
url(r'^find_my_team/$', zerver.views.registration.find_my_team, name='zerver.views.registration.find_my_team'),
url(r'^authors/$', zerver.views.users.authors_view, name='zerver.views.users.authors_view')
]
# If a Terms of Service is supplied, add that route
if settings.TERMS_OF_SERVICE is not None:
i18n_urls += [url(r'^terms/$', TemplateView.as_view(template_name='zerver/terms.html'))]
# Make a copy of i18n_urls so that they appear without prefix for english
urls = list(i18n_urls)
# These endpoints constitute the redesigned API (V1), which uses:
# * REST verbs
# * Basic auth (username:password is email:apiKey)
# * Take and return json-formatted data
#
# If you're adding a new endpoint to the code that requires authentication,
# please add it here.
# See rest_dispatch in zerver.lib.rest for an explanation of auth methods used
#
# All of these paths are accessed by either a /json or /api/v1 prefix
v1_api_and_json_patterns = [
# realm-level calls
url(r'^realm$', rest_dispatch,
{'PATCH': 'zerver.views.realm.update_realm'}),
# Returns a 204, used by desktop app to verify connectivity status
url(r'generate_204$', zerver.views.registration.generate_204, name='zerver.views.registration.generate_204'),
# realm/domains -> zerver.views.realm_aliases
url(r'^realm/domains$', rest_dispatch,
{'GET': 'zerver.views.realm_aliases.list_aliases',
'POST': 'zerver.views.realm_aliases.create_alias'}),
url(r'^realm/domains/(?P<domain>\S+)$', rest_dispatch,
{'PATCH': 'zerver.views.realm_aliases.patch_alias',
'DELETE': 'zerver.views.realm_aliases.delete_alias'}),
# realm/emoji -> zerver.views.realm_emoji
url(r'^realm/emoji$', rest_dispatch,
{'GET': 'zerver.views.realm_emoji.list_emoji'}),
url(r'^realm/emoji/(?P<emoji_name>.*)$', rest_dispatch,
{'PUT': 'zerver.views.realm_emoji.upload_emoji',
'DELETE': 'zerver.views.realm_emoji.delete_emoji'}),
# realm/icon -> zerver.views.realm_icon
url(r'^realm/icon$', rest_dispatch,
{'PUT': 'zerver.views.realm_icon.upload_icon',
'DELETE': 'zerver.views.realm_icon.delete_icon_backend',
'GET': 'zerver.views.realm_icon.get_icon_backend'}),
# realm/filters -> zerver.views.realm_filters
url(r'^realm/filters$', rest_dispatch,
{'GET': 'zerver.views.realm_filters.list_filters',
'POST': 'zerver.views.realm_filters.create_filter'}),
url(r'^realm/filters/(?P<filter_id>\d+)$', rest_dispatch,
{'DELETE': 'zerver.views.realm_filters.delete_filter'}),
# users -> zerver.views.users
#
# Since some of these endpoints do something different if used on
# yourself with `/me` as the email, we need to make sure that we
# don't accidentally trigger these. The cleanest way to do that
# is to add a regular expression assertion that it isn't `/me/`
# (or ends with `/me`, in the case of hitting the root URL).
url(r'^users$', rest_dispatch,
{'GET': 'zerver.views.users.get_members_backend',
'POST': 'zerver.views.users.create_user_backend'}),
url(r'^users/(?!me/)(?P<email>[^/]*)/reactivate$', rest_dispatch,
{'POST': 'zerver.views.users.reactivate_user_backend'}),
url(r'^users/(?!me/)(?P<email>[^/]*)/presence$', rest_dispatch,
{'GET': 'zerver.views.presence.get_presence_backend'}),
url(r'^users/(?!me$)(?P<email>[^/]*)$', rest_dispatch,
{'PATCH': 'zerver.views.users.update_user_backend',
'DELETE': 'zerver.views.users.deactivate_user_backend'}),
url(r'^bots$', rest_dispatch,
{'GET': 'zerver.views.users.get_bots_backend',
'POST': 'zerver.views.users.add_bot_backend'}),
url(r'^bots/(?!me/)(?P<email>[^/]*)/api_key/regenerate$', rest_dispatch,
{'POST': 'zerver.views.users.regenerate_bot_api_key'}),
url(r'^bots/(?!me/)(?P<email>[^/]*)$', rest_dispatch,
{'PATCH': 'zerver.views.users.patch_bot_backend',
'DELETE': 'zerver.views.users.deactivate_bot_backend'}),
# messages -> zerver.views.messages
# GET returns messages, possibly filtered, POST sends a message
url(r'^messages$', rest_dispatch,
{'GET': 'zerver.views.messages.get_old_messages_backend',
'POST': 'zerver.views.messages.send_message_backend'}),
url(r'^messages/(?P<message_id>[0-9]+)$', rest_dispatch,
{'GET': 'zerver.views.messages.json_fetch_raw_message',
'PATCH': 'zerver.views.messages.update_message_backend'}),
url(r'^messages/render$', rest_dispatch,
{'POST': 'zerver.views.messages.render_message_backend'}),
url(r'^messages/flags$', rest_dispatch,
{'POST': 'zerver.views.messages.update_message_flags'}),
url(r'^messages/(?P<message_id>\d+)/history$', rest_dispatch,
{'GET': 'zerver.views.messages.get_message_edit_history'}),
# reactions -> zerver.view.reactions
# PUT adds a reaction to a message
# DELETE removes a reaction from a message
url(r'^messages/(?P<message_id>[0-9]+)/emoji_reactions/(?P<emoji_name>.*)$',
rest_dispatch,
{'PUT': 'zerver.views.reactions.add_reaction_backend',
'DELETE': 'zerver.views.reactions.remove_reaction_backend'}),
# attachments -> zerver.views.attachments
url(r'^attachments$', rest_dispatch,
{'GET': 'zerver.views.attachments.list_by_user'}),
url(r'^attachments/(?P<attachment_id>[0-9]+)$', rest_dispatch,
{'DELETE': 'zerver.views.attachments.remove'}),
# typing -> zerver.views.typing
# POST sends a typing notification event to recipients
url(r'^typing$', rest_dispatch,
{'POST': 'zerver.views.typing.send_notification_backend'}),
# user_uploads -> zerver.views.upload
url(r'^user_uploads$', rest_dispatch,
{'POST': 'zerver.views.upload.upload_file_backend'}),
# invite -> zerver.views.invite
url(r'^invite/bulk$', rest_dispatch,
{'POST': 'zerver.views.invite.bulk_invite_users'}),
# users/me -> zerver.views
url(r'^users/me$', rest_dispatch,
{'GET': 'zerver.views.users.get_profile_backend',
'DELETE': 'zerver.views.users.deactivate_user_own_backend'}),
# PUT is currently used by mobile apps, we intend to remove the PUT version
# as soon as possible. POST exists to correct the erroneous use of PUT.
url(r'^users/me/pointer$', rest_dispatch,
{'GET': 'zerver.views.pointer.get_pointer_backend',
'PUT': 'zerver.views.pointer.update_pointer_backend',
'POST': 'zerver.views.pointer.update_pointer_backend'}),
url(r'^users/me/presence$', rest_dispatch,
{'POST': 'zerver.views.presence.update_active_status_backend'}),
# Endpoint used by mobile devices to register their push
# notification credentials
url(r'^users/me/apns_device_token$', rest_dispatch,
{'POST': 'zerver.views.push_notifications.add_apns_device_token',
'DELETE': 'zerver.views.push_notifications.remove_apns_device_token'}),
url(r'^users/me/android_gcm_reg_id$', rest_dispatch,
{'POST': 'zerver.views.push_notifications.add_android_reg_id',
'DELETE': 'zerver.views.push_notifications.remove_android_reg_id'}),
# users/me -> zerver.views.user_settings
url(r'^users/me/api_key/regenerate$', rest_dispatch,
{'POST': 'zerver.views.user_settings.regenerate_api_key'}),
url(r'^users/me/enter-sends$', rest_dispatch,
{'POST': 'zerver.views.user_settings.change_enter_sends'}),
url(r'^users/me/avatar$', rest_dispatch,
{'PUT': 'zerver.views.user_settings.set_avatar_backend',
'DELETE': 'zerver.views.user_settings.delete_avatar_backend'}),
# settings -> zerver.views.user_settings
url(r'^settings/display$', rest_dispatch,
{'PATCH': 'zerver.views.user_settings.update_display_settings_backend'}),
url(r'^settings/notifications$', rest_dispatch,
{'PATCH': 'zerver.views.user_settings.json_change_notify_settings'}),
url(r'^settings/ui$', rest_dispatch,
{'PATCH': 'zerver.views.user_settings.json_change_ui_settings'}),
# users/me/alert_words -> zerver.views.alert_words
url(r'^users/me/alert_words$', rest_dispatch,
{'GET': 'zerver.views.alert_words.list_alert_words',
'POST': 'zerver.views.alert_words.set_alert_words',
'PUT': 'zerver.views.alert_words.add_alert_words',
'DELETE': 'zerver.views.alert_words.remove_alert_words'}),
url(r'^users/me/(?P<stream_id>\d+)/topics$', rest_dispatch,
{'GET': 'zerver.views.streams.get_topics_backend'}),
# streams -> zerver.views.streams
# (this API is only used externally)
url(r'^streams$', rest_dispatch,
{'GET': 'zerver.views.streams.get_streams_backend'}),
# GET returns `stream_id`, stream name should be encoded in the url query (in `stream` param)
url(r'^get_stream_id', rest_dispatch,
{'GET': 'zerver.views.streams.json_get_stream_id'}),
# GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404)
url(r'^streams/(?P<stream_id>\d+)/members$', rest_dispatch,
{'GET': 'zerver.views.streams.get_subscribers_backend'}),
url(r'^streams/(?P<stream_id>\d+)$', rest_dispatch,
{'PATCH': 'zerver.views.streams.update_stream_backend',
'DELETE': 'zerver.views.streams.deactivate_stream_backend'}),
url(r'^default_streams$', rest_dispatch,
{'POST': 'zerver.views.streams.add_default_stream',
'DELETE': 'zerver.views.streams.remove_default_stream'}),
# GET lists your streams, POST bulk adds, PATCH bulk modifies/removes
url(r'^users/me/subscriptions$', rest_dispatch,
{'GET': 'zerver.views.streams.list_subscriptions_backend',
'POST': 'zerver.views.streams.add_subscriptions_backend',
'PATCH': 'zerver.views.streams.update_subscriptions_backend',
'DELETE': 'zerver.views.streams.remove_subscriptions_backend'}),
# muting -> zerver.views.muting
url(r'^users/me/subscriptions/muted_topics$', rest_dispatch,
{'POST': 'zerver.views.muting.set_muted_topics'}),
# used to register for an event queue in tornado
url(r'^register$', rest_dispatch,
{'POST': 'zerver.views.events_register.events_register_backend'}),
# events -> zerver.tornado.views
url(r'^events$', rest_dispatch,
{'GET': 'zerver.tornado.views.get_events_backend',
'DELETE': 'zerver.tornado.views.cleanup_event_queue'}),
]
# Include the dual-use patterns twice
urls += [
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
]
# user_uploads -> zerver.views.upload.serve_file_backend
#
# This url is an exception to the url naming schemes for endpoints. It
# supports both API and session cookie authentication, using a single
# URL for both (not 'api/v1/' or 'json/' prefix). This is required to
# easily support the mobile apps fetching uploaded files without
# having to rewrite URLs, and is implemented using the
# 'override_api_url_scheme' flag passed to rest_dispatch
urls += url(r'^user_uploads/(?P<realm_id_str>(\d*|unk))/(?P<filename>.*)',
rest_dispatch,
{'GET': ('zerver.views.upload.serve_file_backend',
{'override_api_url_scheme'})}),
# Incoming webhook URLs
# We don't create urls for particular git integrations here
# because of generic one below
for incoming_webhook in WEBHOOK_INTEGRATIONS:
if incoming_webhook.url_object:
urls.append(incoming_webhook.url_object)
urls.append(url(r'^api/v1/external/github', github_dispatcher.api_github_webhook_dispatch))
# Mobile-specific authentication URLs
urls += [
# This json format view used by the mobile apps lists which authentication
# backends the server allows, to display the proper UI and check for server existence
url(r'^api/v1/get_auth_backends', zerver.views.auth.api_get_auth_backends, name='zerver.views.auth.api_get_auth_backends'),
# used by mobile apps to check if they are compatible with the server
url(r'^compatibility$', zerver.views.compatibility.check_compatibility),
# This json format view used by the mobile apps accepts a username
# password/pair and returns an API key.
url(r'^api/v1/fetch_api_key$', zerver.views.auth.api_fetch_api_key, name='zerver.views.auth.api_fetch_api_key'),
# This is for the signing in through the devAuthBackEnd on mobile apps.
url(r'^api/v1/dev_fetch_api_key$', zerver.views.auth.api_dev_fetch_api_key, name='zerver.views.auth.api_dev_fetch_api_key'),
# This is for fetching the emails of the admins and the users.
url(r'^api/v1/dev_get_emails$', zerver.views.auth.api_dev_get_emails, name='zerver.views.auth.api_dev_get_emails'),
# Used to present the GOOGLE_CLIENT_ID to mobile apps
url(r'^api/v1/fetch_google_client_id$',
zerver.views.auth.api_fetch_google_client_id,
name='zerver.views.auth.api_fetch_google_client_id'),
]
# Include URL configuration files for site-specified extra installed
# Django apps
for app_name in settings.EXTRA_INSTALLED_APPS:
app_dir = os.path.join(settings.DEPLOY_ROOT, app_name)
if os.path.exists(os.path.join(app_dir, 'urls.py')):
urls += [url(r'^', include('%s.urls' % (app_name,)))]
i18n_urls += import_string("{}.urls.i18n_urlpatterns".format(app_name))
# Tornado views
urls += [
# Used internally for communication between Django and Tornado processes
url(r'^notify_tornado$', zerver.tornado.views.notify, name='zerver.tornado.views.notify'),
]
# Python Social Auth
urls += [url(r'^', include('social_django.urls', namespace='social'))]
# User documentation site
urls += [url(r'^help/(?P<article>.*)$', HelpView.as_view(template_name='zerver/help/main.html'))]
if settings.DEVELOPMENT:
urls += dev_urls.urls
i18n_urls += dev_urls.i18n_urls
# The sequence is important; if i18n urls don't come first then
# reverse url mapping points to i18n urls which causes the frontend
# tests to fail
urlpatterns = i18n_patterns(*i18n_urls) + urls + legacy_urls
| {
"content_hash": "de66b8b47085587eba63e9cac70e072b",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 136,
"avg_line_length": 50.403669724770644,
"alnum_prop": 0.6827448125227521,
"repo_name": "dattatreya303/zulip",
"id": "23cb8dc6c8e104cfb71dd2f949c4d6d99a13e09c",
"size": "21976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zproject/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "298684"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "541221"
},
{
"name": "JavaScript",
"bytes": "1601573"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3506780"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
from rgfunc import *
def printEmp(employees):
n = 1
for name in employees:
print n, ": ", name.fullname()
n +=1
br()
def new_emp(Employee,employees):
br()
print "Enter New Employees Details"
employee = Employee(raw_input("Enter new employee's frist name: "))
employee.lastname = raw_input("Enter new employee's last name: ")
employee.city = raw_input("Enter name of city: ")
employee.country = raw_input("Enter name of country: ")
employee.day = get_int("Enter the date of birth: ")
employee.month = get_int("Enter the month of birth: ")
employee.year = get_int("Enter the year of birth: ")
employees.append(employee)
br()
def det_emp(employee):
br()
stline()
print employee.fullname()
stline()
br()
print "First Name: ",employee.name
print "Last Name: ",employee.lastname
print "City: ",employee.city
print "Country: ",employee.country
print "Date of Birth:",employee.dateofbirth()
br()
def vd_emp(employees):
if len(employees) == 0:
br()
print "Empty - nothing to View"
br()
else:
br()
printEmp(employees)
vd_empl = get_int("choose employee to vew details: ")
br()
det_emp(employees[vd_empl-1])
def list_emp(employees):
if len(employees) == 0:
br()
print "Empty - nothing to View"
br()
else:
br()
printEmp(employees)
def del_emp(employees):
if len(employees) == 0:
br()
print "Empty - nothing to Delete"
br()
else:
printEmp(employees)
stline()
del_name = raw_input("Which employee you want to delete: ")
try:
del_name = int(del_name)
del employees[del_name-1]
printEmp(employees)
stline()
br()
except:
br()
print "Invalid Input"
br()
def srch_emp(employees):
listName = []
num = []
br()
sr_name = raw_input("Enter name of employee you want to search: ")
br()
no = 1
for name in employees:
if sr_name.lower() == name.name.lower():
listName.append(name.fullname())
num.append(no)
no +=1
if len(listName) == 0:
br()
print "Nothing Found, Try Again"
br()
else:
n= 1
for name in listName:
print num[n-1] , ": " ,name
n +=1
br()
def edit_emp(employees):
pass
if __name__ == "__main__":
print "Error-Invalid File to Run- Please Run main.py."
exit()
| {
"content_hash": "56ae39934e03a0685b75d2711f8385a1",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 68,
"avg_line_length": 20.24770642201835,
"alnum_prop": 0.6452197553239691,
"repo_name": "imughal/EmployeeScript",
"id": "47426f243ec7bdb955538b91d60d71280b34a3de",
"size": "2207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5288"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from panda.models.base_upload import BaseUpload
class Export(BaseUpload):
"""
A dataset exported to a file.
"""
from panda.models.dataset import Dataset
dataset = models.ForeignKey(Dataset, related_name='exports', null=True,
help_text=_('The dataset this export is from.'),
verbose_name=_('dataset'))
file_root = settings.EXPORT_ROOT
class Meta:
app_label = 'panda'
ordering = ['creation_date']
verbose_name = _('Export')
verbose_name_plural = _('Exports')
| {
"content_hash": "38b36b7950eb64ff09f837c4989491af",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 27.375,
"alnum_prop": 0.6621004566210046,
"repo_name": "PalmBeachPost/panda",
"id": "2d8f2788051d879b30b942e824f4d1eef909dc11",
"size": "680",
"binary": false,
"copies": "4",
"ref": "refs/heads/1.2.0",
"path": "panda/models/export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14837"
},
{
"name": "HTML",
"bytes": "51564"
},
{
"name": "Java",
"bytes": "256"
},
{
"name": "JavaScript",
"bytes": "759191"
},
{
"name": "Python",
"bytes": "877718"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
'''
Client.Item.* tests.
Note that when using many of these methods in production, it would be necessary
to handle error cases (e.g. ``INVALID_CREDENTIAL``). However, we don't do that
here since any errors will automatically be marked as failures by the test
runner.
'''
import json
from contextlib import contextmanager
import plaid
from plaid.model.products import Products
from plaid.model.sandbox_public_token_create_request import SandboxPublicTokenCreateRequest
from plaid.model.item_public_token_exchange_request import ItemPublicTokenExchangeRequest
from plaid.model.item_remove_request import ItemRemoveRequest
from plaid.model.item_get_request import ItemGetRequest
from plaid.model.item_import_request import ItemImportRequest
from plaid.model.item_import_request_user_auth import ItemImportRequestUserAuth
from plaid.model.sandbox_item_fire_webhook_request import SandboxItemFireWebhookRequest
from plaid.model.item_access_token_invalidate_request import ItemAccessTokenInvalidateRequest
from plaid.model.item_webhook_update_request import ItemWebhookUpdateRequest
from plaid.model.sandbox_public_token_create_request_options import SandboxPublicTokenCreateRequestOptions
from tests.integration.util import (
create_client,
SANDBOX_INSTITUTION,
)
# Ensure that any items created are also removed
@contextmanager
def ensure_item_removed(access_token):
try:
yield
finally:
request = ItemRemoveRequest(
access_token=access_token
)
client = create_client()
client.item_remove(request)
def test_get():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')]
)
pt_response = client.sandbox_public_token_create(pt_request)
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
with ensure_item_removed(exchange_response['access_token']):
get_request = ItemGetRequest(
access_token=exchange_response['access_token']
)
get_response = client.item_get(get_request)
assert get_response['item'] is not None
def test_remove():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')]
)
pt_response = client.sandbox_public_token_create(pt_request)
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
ir_request = ItemRemoveRequest(
access_token=exchange_response['access_token']
)
remove_response = client.item_remove(ir_request)
assert remove_response['request_id']
try:
ir_request = ItemRemoveRequest(
access_token=exchange_response['access_token']
)
client.item_remove(ir_request)
except plaid.ApiException as e:
response = json.loads(e.body)
assert response['error_code'] == 'ITEM_NOT_FOUND'
def test_import():
client = create_client()
at_request = ItemImportRequest(
products=[Products('identity'), Products('auth')],
user_auth=ItemImportRequestUserAuth(
user_id='user_good',
auth_token='pass_good'
)
)
at_response = client.item_import(at_request)
assert at_response['access_token'] is not None
def test_public_token():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')]
)
pt_response = client.sandbox_public_token_create(pt_request)
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
with ensure_item_removed(exchange_response['access_token']):
assert pt_response['public_token'] is not None
assert exchange_response['access_token'] is not None
def test_sandbox_public_token():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')]
)
pt_response = client.sandbox_public_token_create(pt_request)
assert pt_response['public_token'] is not None
# public token -> access token
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
assert exchange_response['access_token'] is not None
def test_sandbox_fire_webhook():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')],
options=SandboxPublicTokenCreateRequestOptions(
webhook='https://plaid.com/foo/bar/hook'
)
)
pt_response = client.sandbox_public_token_create(pt_request)
assert pt_response['public_token'] is not None
# public token -> access token
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
assert exchange_response['access_token'] is not None
# fire webhook
fire_request = SandboxItemFireWebhookRequest(
access_token=exchange_response['access_token'],
webhook_code='DEFAULT_UPDATE'
)
fire_webhook_response = client.sandbox_item_fire_webhook(fire_request)
assert fire_webhook_response['webhook_fired'] is True
def test_access_token_invalidate():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')]
)
pt_response = client.sandbox_public_token_create(pt_request)
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
try:
invalidate_request = ItemAccessTokenInvalidateRequest(
access_token=exchange_response['access_token']
)
invalidate_response = client.item_access_token_invalidate(
invalidate_request)
with ensure_item_removed(invalidate_response['new_access_token']):
assert invalidate_response['new_access_token'] is not None
except Exception:
with ensure_item_removed(exchange_response['access_token']):
raise
def test_webhook_update():
client = create_client()
pt_request = SandboxPublicTokenCreateRequest(
institution_id=SANDBOX_INSTITUTION,
initial_products=[Products('transactions')]
)
pt_response = client.sandbox_public_token_create(pt_request)
exchange_request = ItemPublicTokenExchangeRequest(
public_token=pt_response['public_token']
)
exchange_response = client.item_public_token_exchange(exchange_request)
with ensure_item_removed(exchange_response['access_token']):
webhook_request = ItemWebhookUpdateRequest(
access_token=exchange_response['access_token'],
webhook='https://plaid.com/webhook-test'
)
webhook_response = client.item_webhook_update(webhook_request)
assert (webhook_response['item']['webhook'] ==
'https://plaid.com/webhook-test')
| {
"content_hash": "ba3a9188740002dbbb6f5b97ff1a6bb1",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 106,
"avg_line_length": 33.400862068965516,
"alnum_prop": 0.7065427797135114,
"repo_name": "plaid/plaid-python",
"id": "3caabacc445e4260870b0804b3c0a3f831e332df",
"size": "7749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
} |
import doctest
import unittest
from mptt.tests import doctests
from mptt.tests import testcases
def suite():
s = unittest.TestSuite()
s.addTest(doctest.DocTestSuite(doctests))
s.addTest(unittest.defaultTestLoader.loadTestsFromModule(testcases))
return s
| {
"content_hash": "f8b75057c3053bf24f4d50a27b453f30",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 24.727272727272727,
"alnum_prop": 0.7757352941176471,
"repo_name": "emiquelito/django-cms-2.0",
"id": "b89a53b567f9bfe655281ad119b2e33c4e0161ed",
"size": "272",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "mptt/tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "711305"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "1196111"
}
],
"symlink_target": ""
} |
def CreateDataLoader(datafolder,dataroot='./dataset',dataset_mode='2afc',load_size=64,batch_size=1,serial_batches=True,nThreads=4):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
# print(data_loader.name())
data_loader.initialize(datafolder,dataroot=dataroot+'/'+dataset_mode,dataset_mode=dataset_mode,load_size=load_size,batch_size=batch_size,serial_batches=serial_batches, nThreads=nThreads)
return data_loader
| {
"content_hash": "3a7ea40dd87ea7d3e5a1a686bd2795bc",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 190,
"avg_line_length": 82.33333333333333,
"alnum_prop": 0.7854251012145749,
"repo_name": "richzhang/PerceptualSimilarity",
"id": "db989c18717e69af5d672a979721e2fe2b4ffba4",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/data_loader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1671"
},
{
"name": "Python",
"bytes": "69306"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
from os.path import dirname, join
BASE_DIR = dirname(dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': join(BASE_DIR, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = 'https://dn-django-qiniu-storage.qbox.me/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9=mp)3*)4=bf_)-rf^4&34shdwqsmtn%bh#!lw^s$1i=#c4s&@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'demo_site.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'demo_site.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'qiniustorage',
'foo',
'bar',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# auth
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'demo_site.auth.SettingsBackend',
)
ADMIN_LOGIN = 'admin'
ADMIN_PASSWORD = 'admin'
DEFAULT_FILE_STORAGE = 'qiniustorage.backends.QiniuMediaStorage'
STATICFILES_STORAGE = 'qiniustorage.backends.QiniuStaticStorage'
QINIU_SECURE_URL = False
| {
"content_hash": "438b361762700153e62af283661c74c3",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 127,
"avg_line_length": 33.07954545454545,
"alnum_prop": 0.6884232222603917,
"repo_name": "glasslion/django-qiniu-storage",
"id": "65a84e6b539f6f3b2bdc9f94aaa645b6c6a4ad02",
"size": "5863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_site/demo_site/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1665"
},
{
"name": "Python",
"bytes": "34599"
}
],
"symlink_target": ""
} |
import re
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
# TODO(JLH) Should we remove the explicit include of the ovs plugin here
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name =" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=2"] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if check_error:
raise
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
if 'Exit code: 2\n' in str(e):
return False
raise
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
if 'Exit code: 1\n' not in str(e):
raise
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
self.re_id = self.re_compile_id()
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def re_compile_id(self):
external = 'external_ids\s*'
mac = 'attached-mac="(?P<vif_mac>([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"'
iface = 'iface-id="(?P<vif_id>[^"]+)"'
name = 'name\s*:\s"(?P<port_name>[^"]*)"'
port = 'ofport\s*:\s(?P<ofport>-?\d+)'
_re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }'
' \s+ %(name)s \s+ %(port)s' % {'external': external,
'mac': mac,
'iface': iface, 'name': name,
'port': port})
return re.compile(_re, re.M | re.X)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self):
self.destroy()
self.create()
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
return self.db_get_val("Interface", port_name, "ofport")
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def _build_flow_expr_arr(self, **kwargs):
flow_expr_arr = []
is_delete_expr = kwargs.get('delete', False)
if not is_delete_expr:
prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" %
(kwargs.get('hard_timeout', '0'),
kwargs.get('idle_timeout', '0'),
kwargs.get('priority', '1')))
flow_expr_arr.append(prefix)
elif 'priority' in kwargs:
raise Exception(_("Cannot match priority on flow deletion"))
table = ('table' in kwargs and ",table=%s" %
kwargs['table'] or '')
in_port = ('in_port' in kwargs and ",in_port=%s" %
kwargs['in_port'] or '')
dl_type = ('dl_type' in kwargs and ",dl_type=%s" %
kwargs['dl_type'] or '')
dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" %
kwargs['dl_vlan'] or '')
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or ''
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
match = (table + in_port + dl_type + dl_vlan + dl_src + dl_dst +
(proto or ip) + nw_src + nw_dst + tun_id)
if match:
match = match[1:] # strip leading comma
flow_expr_arr.append(match)
return flow_expr_arr
def add_or_mod_flow_str(self, **kwargs):
if "actions" not in kwargs:
raise Exception(_("Must specify one or more actions"))
if "priority" not in kwargs:
kwargs["priority"] = "0"
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
return flow_str
def add_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['add'] += flow_str + '\n'
else:
self.run_ofctl("add-flow", [flow_str])
def mod_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['mod'] += flow_str + '\n'
else:
self.run_ofctl("mod-flows", [flow_str])
def delete_flows(self, **kwargs):
kwargs['delete'] = True
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
if "actions" in kwargs:
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
if self.defer_apply_flows:
self.deferred_flows['del'] += flow_str + '\n'
else:
self.run_ofctl("del-flows", [flow_str])
def defer_apply_on(self):
LOG.debug(_('defer_apply_on'))
self.defer_apply_flows = True
def defer_apply_off(self):
LOG.debug(_('defer_apply_off'))
for action, flows in self.deferred_flows.items():
if flows:
LOG.debug(_('Applying following deferred flows '
'to bridge %s'), self.br_name)
for line in flows.splitlines():
LOG.debug(_('%(action)s: %(flow)s'),
{'action': action, 'flow': line})
self.run_ofctl('%s-flows' % action, ['-'], flows)
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == p_const.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
return self.get_port_ofport(port_name)
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column):
output = self.run_vsctl(["get", table, record, column])
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column):
output = self.run_vsctl(["get", table, record, column])
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name])
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids")
ofport = self.db_get_val("Interface", name, "ofport")
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids',
'list', 'Interface']
result = self.run_vsctl(args)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
if "iface-id" in external_ids and "attached-mac" in external_ids:
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
return edge_ports
def get_vif_port_by_id(self, port_id):
args = ['--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
match = self.re_id.search(result)
try:
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
return VifPort(port_name, ofport, vif_id, vif_mac, self)
except Exception as e:
LOG.info(_("Unable to parse regex results. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=2", "iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=2", "list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
return []
def get_installed_ovs_usr_version(root_helper):
args = ["ovs-vsctl", "--version"]
try:
cmd = utils.execute(args, root_helper=root_helper)
ver = re.findall("\d+\.\d+", cmd)[0]
return ver
except Exception:
LOG.exception(_("Unable to retrieve OVS userspace version."))
def get_installed_ovs_klm_version():
args = ["modinfo", "openvswitch"]
try:
cmd = utils.execute(args)
for line in cmd.split('\n'):
if 'version: ' in line and not 'srcversion' in line:
ver = re.findall("\d+\.\d+", line)
return ver[0]
except Exception:
LOG.exception(_("Unable to retrieve OVS kernel module version."))
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
| {
"content_hash": "f6e1e021ed32247b93901ebf014db052",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 79,
"avg_line_length": 39.3696682464455,
"alnum_prop": 0.5347297459973517,
"repo_name": "ntt-sic/neutron",
"id": "b6fcf9270142c3d272ca1e58ee6d8bb344ce0bd1",
"size": "17481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/linux/ovs_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "7243854"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
(C) Copyright 2011, 10gen
This is a label on a mattress. Do not modify this file!
"""
# Python
import socket, threading, time
muninAgentVersion = "1.5.7"
def containsStr( val, query ):
""" Returns true if the value is contained in the string """
return val.find( query ) > -1
class MuninThread( threading.Thread ):
""" Pull them munin data from the various hosts. """
def __init__( self, hostname, mmsAgent ):
""" Initialize the object """
self.hostname = hostname
self.mmsAgent = mmsAgent
self.logger = mmsAgent.logger
self.muninNode = MuninNode( self.hostname )
self.running = True
threading.Thread.__init__( self )
def run( self ):
""" Pull the munin data from the various hosts. """
self.logger.info( 'starting munin monitoring: ' + self.hostname + ':4949' )
sleepTime = ( self.mmsAgent.collectionInterval / 2 ) - 1
if ( sleepTime < 1 ):
sleepTime = 1
while not self.mmsAgent.done and self.mmsAgent.hasUniqueServer( self.hostname ) and self.running:
try:
time.sleep( sleepTime )
self._collectAndSetState()
except:
pass
self.logger.info( 'stopping munin monitoring: ' + self.hostname + ':4949' )
def stopThread( self ):
""" Stop the thread. This sets a running flag to false """
self.running = False
def _collectAndSetState( self ):
""" Collect the data and set the state """
muninStats = self._collectStats()
if muninStats is None:
return
muninStats['host'] = self.hostname
self.mmsAgent.setMuninHostState( self.hostname, muninStats )
def _collectStats( self ):
""" Collect the data from the munin host """
try:
return self.muninNode.fetchAndConfigMany( [ "cpu" , "iostat" , "iostat_ios" ] )
except:
return None
class MuninNode( object ):
""" The Munin node collection object """
def __init__( self, host='127.0.0.1', port=4949 ):
""" Constructor """
self.host = host
self.port = port
def _send( self, cmd, sock ):
""" Send a command to Munin """
sock.send( cmd + "\r\n" )
def _readline( self, f ):
""" Read data from vendor.munin """
return f.readline().split("\n")[0]
def list( self, sock, f ):
""" Run a list operation """
self._send( 'list', sock )
s = self._readline( f )
return s.split( ' ' )
def config( self, cmd, sock, f ):
""" Run a config operation """
return self._data( 'config', cmd, sock, f )
def fetch( self, cmd, sock, f ):
""" Run a fetch operation """
return self._data( 'fetch', cmd, sock, f )
def _data( self, cmdType, cmd, sock, f ):
""" Collect data """
self._send( cmdType + ' ' + cmd, sock )
data = []
while True:
s = self._readline( f )
if s == ".":
break
if cmdType == 'config':
if containsStr( s, '.label' ) == False:
continue
data.append( s )
return data
def connect( self ):
""" Connect to the Munin node """
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
sock.connect( ( self.host, self.port ) )
f = sock.makefile()
if not f:
try:
sock.close()
except:
pass
raise Exception( 'Error reading data from socket' )
banner = f.readline() # banner
if len( banner ) == 0:
raise Exception( 'Unable to connect to Munin' )
return ( sock, f )
def disconnect( self, sock, f ):
""" Disconnect from vendor.munin """
try:
try:
self._send( 'quit', sock )
finally:
sock.close()
finally:
if f is not None:
f.close()
def fetchAndConfigMany( self, cmdTypes ):
""" The fetch and config many cmds - opens and closes the connection """
sock = None
f = None
try:
sock, f = self.connect()
fetch = {}
config = {}
for t in cmdTypes:
fetch[t] = self.fetch( t, sock, f )
if ( t == 'cpu' ):
config[t] = { }
else:
config[t] = self.config( t, sock, f )
return { 'fetch' : fetch, 'config' : config }
finally:
try:
self.disconnect( sock, f )
except:
pass
| {
"content_hash": "f2b7daf834d56f532480271526e7cbe9",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 105,
"avg_line_length": 27.923529411764704,
"alnum_prop": 0.5072677480514008,
"repo_name": "AlphaCluster/NewsBlur",
"id": "1da32d34e13cc9a57a4cc73933e911b9f1f08c3d",
"size": "4747",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor/mms-agent/munin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "454"
},
{
"name": "CSS",
"bytes": "720684"
},
{
"name": "CoffeeScript",
"bytes": "9696"
},
{
"name": "Dockerfile",
"bytes": "1331"
},
{
"name": "HTML",
"bytes": "492242"
},
{
"name": "Java",
"bytes": "955691"
},
{
"name": "JavaScript",
"bytes": "1680848"
},
{
"name": "Objective-C",
"bytes": "2591129"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2741187"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40999"
},
{
"name": "Swift",
"bytes": "3520"
}
],
"symlink_target": ""
} |
import unittest
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import op_utils
def _create_batch_norm_ndarray_args(
xp, device, x_shape, gamma_shape, beta_shape, mean_shape, var_shape,
float_dtype):
x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
# Non-contiguous gamma and beta is not supported by CUDA.
# TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
# contiguous copy in the cuDNN wrapper.
pad_gamma_beta = device.backend.name != 'cuda'
gamma = array_utils.create_dummy_ndarray(
xp, gamma_shape, float_dtype, padding=pad_gamma_beta)
beta = array_utils.create_dummy_ndarray(
xp, beta_shape, float_dtype, padding=pad_gamma_beta)
# Non-contiguous running values which are updated in-place are not
# supported by CUDA, so we only pad for other devices.
pad_running = device.backend.name != 'cuda'
mean = array_utils.create_dummy_ndarray(
xp, mean_shape, float_dtype, padding=pad_running)
var = array_utils.create_dummy_ndarray(
xp, var_shape, float_dtype, padding=pad_running, start=0)
# TODO(imanishi): Remove them after supporting random test
x /= x.size
gamma /= gamma.size
beta /= beta.size
mean /= mean.size
var /= var.size
return x, gamma, beta, mean, var
# Note that CUDA (cuDNN) only supports batch normalization with 4 or
# 5-dimensional data. Arrays with smaller dimensions are supported by the
# CUDA backend, while those with larger dimensions are not.
# x_shape,reduced_shape,axis
_batch_norm_params = [
((3, 2), (2,), None),
((5, 4, 3, 2), (4, 3, 2), None),
((5, 4, 3, 2), (4, 3, 2), (0,)),
((5, 4, 3, 2), (4,), (0, 2, 3)),
((5, 4, 3, 2, 2), (4, 3, 2, 2), None),
((5, 4, 3, 2, 2), (4, 3, 2, 2), (0,)),
((5, 4, 3, 2, 2), (4,), (0, 2, 3, 4))
]
# x_shape,gamma_shape,beta_shape,mean_shape,var_shape,axis
_batch_norm_invalid_dimensions_params = [
# Bad reduction, axis defaults to (0,) but should be (0, 2, 3).
((2, 3, 4, 5), (3,), (3,), (3,), (3,), None),
# Bad reduction, axis is () but should be (0, 2, 3).
((2, 3, 4, 5), (3,), (3,), (3,), (3,), ()),
# Bad reduction, axis is (2, 3) but should be (0, 2, 3).
((2, 3, 4, 5), (3,), (3,), (3,), (3,), (2, 3)),
((2, 3, 4, 5), (3, 4), (3,), (3,), (3,), (0, 2, 3)), # Bad gamma shape.
((2, 3, 4, 5), (3,), (3, 4), (3,), (3,), (0, 2, 3)), # Bad beta shape.
((2, 3, 4, 5), (3,), (3,), (3, 4), (3,), (0, 2, 3)), # Bad mean shape.
((2, 3, 4, 5), (3,), (3,), (3,), (3, 4), (0, 2, 3)), # Bad var shape.
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'x_shape,reduced_shape,axis', _batch_norm_params)
@chainer.testing.parameterize_pytest(
'x_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest(
'param_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest('eps', [2e-5, 5e-1])
@chainer.testing.parameterize_pytest('decay', [None, 0.5])
@chainer.testing.parameterize_pytest('contiguous', [None, 'C'])
class TestBatchNorm(op_utils.ChainerOpTest):
def setup(self):
reduced_shape = self.reduced_shape
x_dtype = self.x_dtype
param_dtype = self.param_dtype
eps = self.eps
decay = self.decay
axis = self.axis
contiguous = self.contiguous
# - Non-contiguous running values which are updated in-place are not
# supported by CUDA.
# - Non-contiguous gamma and beta is not supported by CUDA.
# TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
# contiguous copy in the cuDNN wrapper.
if (chainerx.get_default_device().backend.name == 'cuda'
and contiguous is None):
raise unittest.SkipTest(
'batch_norm with CUDA currently has limited support for '
'non-contiguous inputs.')
# BatchNorm is unstable for fp16 for both native and CUDA.
# TODO(hvy): Fix backward and double backward for fp16.
if x_dtype == 'float16' and param_dtype == 'float16':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.running_mean = numpy.random.uniform(
-1, 1, reduced_shape).astype(param_dtype)
self.running_var = numpy.random.uniform(
0.1, 1, reduced_shape).astype(param_dtype)
optional_args = {}
if eps is not None:
optional_args['eps'] = eps
if decay is not None:
optional_args['decay'] = decay
if axis is not None:
optional_args['axis'] = axis
self.optional_args = optional_args
# TODO(hvy): Fix forward, backward and double backward for fp16.
if x_dtype == 'float16' or param_dtype == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
else:
self.check_forward_options.update({
'rtol': 1e-6, 'atol': 1e-5})
self.check_backward_options.update({
'rtol': 5e-3, 'atol': 5e-4})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-3})
# Running values that are recorded in forward for similarity checks.
self.running_mean_chx = None
self.running_var_chx = None
self.running_mean_ch = None
self.running_var_ch = None
def generate_inputs(self):
x_shape = self.x_shape
reduced_shape = self.reduced_shape
x_dtype = self.x_dtype
param_dtype = self.param_dtype
x = numpy.random.uniform(-1, 1, x_shape).astype(x_dtype)
gamma = numpy.random.uniform(0.5, 1, reduced_shape).astype(param_dtype)
beta = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
return x, gamma, beta,
def forward_chainerx(self, inputs):
x, gamma, beta = inputs
running_mean = chainerx.array(self.running_mean, copy=True)
running_var = chainerx.array(self.running_var, copy=True)
y = chainerx.batch_norm(
x, gamma, beta, running_mean=running_mean, running_var=running_var,
**self.optional_args)
# Record running values for later checks.
self.running_mean_chx = running_mean
self.running_var_chx = running_var
return y,
def forward_chainer(self, inputs):
x, gamma, beta = inputs
running_mean = self.running_mean.copy()
running_var = self.running_var.copy()
y = chainer.functions.batch_normalization(
x, gamma, beta, running_mean=running_mean, running_var=running_var,
**self.optional_args)
# Record running values for later checks.
self.running_mean_ch = running_mean
self.running_var_ch = running_var
return y,
def check_forward_outputs(self, outputs, expected_outputs):
super().check_forward_outputs(outputs, expected_outputs)
# Check that running values are updated.
if (self.x_dtype == 'float16'
or self.param_dtype == 'float16'):
check_running_options = {'rtol': 1e-1, 'atol': 1e-1}
else:
check_running_options = {'rtol': 1e-6, 'atol': 1e-5}
chainerx.testing.assert_allclose(
self.running_mean_chx, self.running_mean_ch,
**check_running_options)
chainerx.testing.assert_allclose(
self.running_var_chx, self.running_var_ch, **check_running_options)
@pytest.mark.parametrize(
'x_shape,gamma_shape,beta_shape,running_mean_shape,running_var_shape,axis',
_batch_norm_invalid_dimensions_params)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_batch_norm_invalid_dimensions(
device, x_shape, gamma_shape, beta_shape, running_mean_shape,
running_var_shape, axis, float_dtype):
x, gamma, beta, running_mean, running_var = (
_create_batch_norm_ndarray_args(
chainerx, device, x_shape, gamma_shape, beta_shape,
running_mean_shape, running_var_shape, float_dtype))
with pytest.raises(chainerx.DimensionError):
chainerx.batch_norm(
x, gamma, beta, running_mean=running_mean, running_var=running_var,
eps=1e-2, decay=0.9, axis=axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'x_shape,reduced_shape,axis', _batch_norm_params)
@chainer.testing.parameterize_pytest(
'x_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest(
'param_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest('eps', [None, 3e-5, 1.2])
@chainer.testing.parameterize_pytest('contiguous', [None, 'C'])
class TestFixedBatchNorm(op_utils.ChainerOpTest):
# Backward and double backward for fixed_batch_norm is not supported yet.
skip_backward_test = True
skip_double_backward_test = True
def setup(self, float_dtype):
x_dtype = self.x_dtype
param_dtype = self.param_dtype
eps = self.eps
axis = self.axis
optional_args = {}
if eps is not None:
optional_args['eps'] = eps
if axis is not None:
optional_args['axis'] = axis
self.optional_args = optional_args
if x_dtype == 'float16' or param_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-1, 'atol': 1e-1})
else:
self.check_forward_options.update({'rtol': 1e-6, 'atol': 1e-5})
def generate_inputs(self):
x_shape = self.x_shape
reduced_shape = self.reduced_shape
x_dtype = self.x_dtype
param_dtype = self.param_dtype
x = numpy.random.uniform(-1, 1, x_shape).astype(x_dtype)
gamma = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
beta = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
mean = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
var = numpy.random.uniform(0.1, 1, reduced_shape).astype(param_dtype)
return x, gamma, beta, mean, var
def forward_chainerx(self, inputs):
x, gamma, beta, mean, var = inputs
y = chainerx.fixed_batch_norm(
x, gamma, beta, mean=mean, var=var, **self.optional_args)
return y,
def forward_chainer(self, inputs):
x, gamma, beta, mean, var = inputs
y = chainer.functions.fixed_batch_normalization(
x, gamma, beta, mean=mean, var=var, **self.optional_args)
return y,
@pytest.mark.parametrize(
'x_shape,gamma_shape,beta_shape,mean_shape,var_shape,axis',
_batch_norm_invalid_dimensions_params)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_fixed_batch_norm_invalid_dimensions(
device, x_shape, gamma_shape, beta_shape, mean_shape, var_shape, axis,
float_dtype):
x, gamma, beta, mean, var = _create_batch_norm_ndarray_args(
chainerx, device, x_shape, gamma_shape, beta_shape, mean_shape,
var_shape, float_dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.fixed_batch_norm(
x, gamma, beta, mean=mean, var=var, eps=1e-2, axis=axis)
| {
"content_hash": "2a998868b7b990c98606aa005bfb2be9",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 79,
"avg_line_length": 38.07236842105263,
"alnum_prop": 0.6100743044755487,
"repo_name": "chainer/chainer",
"id": "67891b36ebd2348fff7bc0d2d8f33d49b1bd7071",
"size": "11574",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/chainerx_tests/unit_tests/routines_tests/test_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3805"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1688016"
},
{
"name": "CMake",
"bytes": "51351"
},
{
"name": "Cuda",
"bytes": "191633"
},
{
"name": "Dockerfile",
"bytes": "6102"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6431941"
},
{
"name": "Shell",
"bytes": "50151"
}
],
"symlink_target": ""
} |
import os
import time
from ds3 import ds3
client = ds3.createClientFromEnv()
bucketName = "books"
# make sure the bucket that we will be sending objects to exists
client.put_bucket(ds3.PutBucketRequest(bucketName))
# create your list of objects that will be sent to DS3
# the key to the dictionary is the name the object will have on the server, and the values are the files to be transferred
# this example assumes that these files exist on the file system
fileListMapping = {
"beowulf.txt":"resources/beowulf.txt",
"sherlock_holmes.txt":"resources/sherlock_holmes.txt",
"tale_of_two_cities.txt":"resources/tale_of_two_cities.txt",
"ulysses.txt":"resources/ulysses.txt",
"folder/beowulf.txt":"resources/beowulf.txt",
"folder/sherlock_holmes.txt":"resources/sherlock_holmes.txt",
"folder/folder2/tale_of_two_cities.txt":"resources/tale_of_two_cities.txt",
"folder/folder2/ulysses.txt":"resources/ulysses.txt"
}
# this method is used to map a file path to a Ds3PutObject
# we need two parameters because the S3 API wants the name that the object will take on the server, but the size obviously needs to come from the file on the current file system
def fileNameToDs3PutObject(fileName, realFileName):
size = os.stat(realFileName).st_size
return ds3.Ds3PutObject(fileName, size)
# get the sizes for each file
fileList = list([fileNameToDs3PutObject(key, fileListMapping[key]) for key in list(fileListMapping.keys())])
# submit the put bulk request to DS3
bulkResult = client.put_bulk_job_spectra_s3(ds3.PutBulkJobSpectraS3Request(bucketName, fileList))
# the bulk request will split the files over several chunks if it needs to.
# we then need to ask what chunks we can send, and then send them making
# sure we don't resend the same chunks
# create a set of the chunk ids which will be used to track
# what chunks have not been sent
chunkIds = set([x['ChunkId'] for x in bulkResult.result['ObjectsList']])
# while we still have chunks to send
while len(chunkIds) > 0:
# get a list of the available chunks that we can send
availableChunks = client.get_job_chunks_ready_for_client_processing_spectra_s3(
ds3.GetJobChunksReadyForClientProcessingSpectraS3Request(bulkResult.result['JobId']))
chunks = availableChunks.result['ObjectsList']
# check to make sure we got some chunks, if we did not
# sleep and retry. This could mean that the cache is full
if len(chunks) == 0:
time.sleep(60)
continue
# for each chunk that is available, check to make sure
# we have not sent it, and if not, send that object
for chunk in chunks:
if not chunk['ChunkId'] in chunkIds:
continue
chunkIds.remove(chunk['ChunkId'])
for obj in chunk['ObjectList']:
# it is possible that if we start resending a chunk, due to the program crashing, that
# some objects will already be in cache. Check to make sure that they are not, and then
# send the object to Spectra S3
if obj['InCache'] == 'false':
objectDataStream = open(fileListMapping[obj['Name']], "rb")
objectDataStream.seek(int(obj['Offset']), 0)
putObjectResponse = client.put_object(ds3.PutObjectRequest(bucket_name=bucketName,
object_name=obj['Name'],
offset=obj['Offset'],
length=obj['Length'],
stream=objectDataStream,
job=bulkResult.result['JobId']))
# we now verify that all our objects have been sent to DS3
bucketResponse = client.get_bucket(ds3.GetBucketRequest(bucketName))
print("\nFiles in bucket:")
for obj in bucketResponse.result['ContentsList']:
print(obj['Key'])
# objects on the server are arranged in a flat structure, but filepath-like names can be simulated using prefixes.
# deleteFolder will delete any object with "prefix/", in this case "folder/"
client.delete_folder_recursively_spectra_s3(ds3.DeleteFolderRecursivelySpectraS3Request(bucketName, "folder/folder2"))
print("\nAfter deleting 'folder/folder2':")
bucketResponse = client.get_bucket(ds3.GetBucketRequest(bucketName))
for obj in bucketResponse.result['ContentsList']:
print(obj['Key'])
client.delete_folder_recursively_spectra_s3(ds3.DeleteFolderRecursivelySpectraS3Request(bucketName, "folder"))
print("\nAfter deleting 'folder':")
bucketResponse = client.get_bucket(ds3.GetBucketRequest(bucketName))
for obj in bucketResponse.result['ContentsList']:
print(obj['Key'])
# delete everything else
for obj in bucketResponse.result['ContentsList']:
client.delete_object(ds3.DeleteObjectRequest(bucketName, obj['Key']))
client.delete_bucket(ds3.DeleteBucketRequest(bucketName))
| {
"content_hash": "baf033d742224088b8eae43cfd49038f",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 177,
"avg_line_length": 43.421052631578945,
"alnum_prop": 0.6909090909090909,
"repo_name": "SpectraLogic/ds3_python3_sdk",
"id": "191de2c478282e16f7ed378424bb64a9c673cba3",
"size": "5530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/renaming.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "344"
},
{
"name": "Python",
"bytes": "728198"
},
{
"name": "Ruby",
"bytes": "2968"
},
{
"name": "Shell",
"bytes": "516"
}
],
"symlink_target": ""
} |
from office365.outlook.calendar.schedule_item import ScheduleItem
from office365.outlook.calendar.working_hours import WorkingHours
from office365.runtime.client_value import ClientValue
from office365.runtime.client_value_collection import ClientValueCollection
class ScheduleInformation(ClientValue):
"""Represents the availability of a user, distribution list, or resource (room or equipment)
for a specified time period."""
def __init__(self, scheduleId=None, scheduleItems=None, availabilityView=None, error=None, workingHours=None):
"""
:param WorkingHours workingHours: The days of the week and hours in a specific time zone that the user works.
These are set as part of the user's mailboxSettings.
:param str error: Error information from attempting to get the availability of the user, distribution list,
or resource.
:param str availabilityView: Represents a merged view of availability of all the items in scheduleItems.
The view consists of time slots. Availability during each time slot is indicated with:
0= free, 1= tentative, 2= busy, 3= out of office, 4= working elsewhere.
:param ClientValueCollection(ScheduleItem) scheduleItems: Contains the items that describe the availability
of the user or resource.
:param str scheduleId: An SMTP address of the user, distribution list, or resource, identifying an instance
of scheduleInformation.
"""
super(ScheduleInformation, self).__init__()
self.scheduleItems = ClientValueCollection(ScheduleItem) if scheduleItems is None else scheduleItems
self.scheduleId = scheduleId
self.availabilityView = availabilityView
self.error = error
self.workingHours = WorkingHours() if workingHours is None else workingHours
| {
"content_hash": "bf651306c5ef740be6c4ee24a90f6a5c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 117,
"avg_line_length": 60.225806451612904,
"alnum_prop": 0.7305838243170862,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "52aac5bde2d94ba1e502964ba3131fe398de0da4",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/outlook/calendar/schedule_information.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
"""
Module that contains many useful utilities
for validating data or function arguments
"""
import warnings
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : purely args validation
validate_kwargs : purely kwargs validation
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame or Panel
arg : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO(PY3): Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = ("{} got multiple values for argument "
"'{}'".format(method_name, arg_name))
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = args[0]
elif len(args) == 2:
if 'axis' in kwargs:
# Unambiguously wrong
msg = ("Cannot specify both 'axis' and any of 'index' "
"or 'columns'")
raise TypeError(msg)
msg = ("Interpreting call\n\t'.{method_name}(a, b)' as "
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
" a 'TypeError'.")
warnings.warn(msg.format(method_name=method_name,), FutureWarning,
stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
out[data._AXIS_NAMES[1]] = args[1]
else:
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
| {
"content_hash": "bac108e42386386489838e949fb5c14c",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 78,
"avg_line_length": 36.427374301675975,
"alnum_prop": 0.617437313089487,
"repo_name": "kdebrab/pandas",
"id": "a96563051e7de87c66320535c9aec205ee0c3ded",
"size": "13041",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pandas/util/_validators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "431689"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13653350"
},
{
"name": "Shell",
"bytes": "25368"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
def connection_requires_http_tunnel(
proxy_url=None, proxy_config=None, destination_scheme=None
):
"""
Returns True if the connection requires an HTTP CONNECT through the proxy.
:param URL proxy_url:
URL of the proxy.
:param ProxyConfig proxy_config:
Proxy configuration from poolmanager.py
:param str destination_scheme:
The scheme of the destination. (i.e https, http, etc)
"""
# If we're not using a proxy, no way to use a tunnel.
if proxy_url is None:
return False
# HTTP destinations never require tunneling, we always forward.
if destination_scheme == "http":
return False
# Support for forwarding with HTTPS proxies and HTTPS destinations.
if (
proxy_url.scheme == "https"
and proxy_config
and proxy_config.use_forwarding_for_https
):
return False
# Otherwise always use a tunnel.
return True
def create_proxy_ssl_context(
ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
):
"""
Generates a default proxy ssl context if one hasn't been provided by the
user.
"""
ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(ssl_version),
cert_reqs=resolve_cert_reqs(cert_reqs),
)
if (
not ca_certs
and not ca_cert_dir
and not ca_cert_data
and hasattr(ssl_context, "load_default_certs")
):
ssl_context.load_default_certs()
return ssl_context
| {
"content_hash": "6823df66ec0cb4e27629cfa1cde0ebdc",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 28.157894736842106,
"alnum_prop": 0.6510903426791277,
"repo_name": "sbidoul/pip",
"id": "2199cc7b7f004009493d032720c36d6568f9d89e",
"size": "1605",
"binary": false,
"copies": "23",
"ref": "refs/heads/main",
"path": "src/pip/_vendor/urllib3/util/proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7107244"
}
],
"symlink_target": ""
} |
from os import path
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from gluon.http import redirect
from s3 import FS, S3CustomController
from s3theme import formstyle_foundation_inline
THEME = "DRK"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
T = current.T
request = current.request
response = current.response
s3 = response.s3
# Check logged in and permissions
auth = current.auth
settings = current.deployment_settings
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
# Login/Registration forms
self_registration = current.deployment_settings.get_security_registration_visible()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
# Contact Form
request_email = settings.get_frontpage("request_email")
if request_email:
from s3dal import Field
from gluon.validators import IS_NOT_EMPTY
from gluon.sqlhtml import SQLFORM
fields = [Field("name",
label="Your name",
requires=IS_NOT_EMPTY(),
),
Field("address",
label="Your e-mail address",
requires=IS_NOT_EMPTY(),
),
Field("subject",
label="Subject",
requires=IS_NOT_EMPTY(),
),
Field("message", "text",
label="Message",
requires=IS_NOT_EMPTY(),
),
]
from s3 import s3_mark_required
labels, required = s3_mark_required(fields)
s3.has_required = required
response.form_label_separator = ""
contact_form = SQLFORM.factory(formstyle = settings.get_ui_formstyle(),
submit_button = T("Submit"),
labels = labels,
separator = "",
table_name = "contact", # Dummy table name
_id="mailform",
*fields
)
if contact_form.accepts(request.post_vars,
current.session,
formname="contact_form",
keepvalues=False,
hideerror=False):
# Processs Contact Form
form_vars = contact_form.vars
sender = "%s <%s>" % (form_vars.name, form_vars.address)
result = current.msg.send_email(to=request_email,
sender=sender,
subject=form_vars.subject,
message=form_vars.message,
reply_to=form_vars.address,
)
if result:
response.confirmation = "Thank you for your message - we'll be in touch shortly"
if s3.cdn:
if s3.debug:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.js")
else:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.min.js")
else:
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % request.application)
validation_script = '''
$('#mailform').validate({
errorClass:'req',
rules:{
name:{
required:true
},
address: {
required:true,
email:true
},
subject:{
required:true
},
message:{
required:true
}
},
messages:{
name:"Enter your name",
subject:"Enter a subject",
message:"Enter a message",
address:{
required:"Please enter a valid email address",
email:"Please enter a valid email address"
}
},
errorPlacement:function(error,element){
error.appendTo(element.parents('div.controls'))
},
submitHandler:function(form){
form.submit()
}
})'''
s3.jquery_ready.append(validation_script)
else:
contact_form = ""
if AUTHENTICATED not in roles:
login_buttons = DIV(A(T("Login"),
_id="show-login",
_class="tiny secondary button"),
_id="login-buttons"
)
script = '''
$('#show-mailform').click(function(e){
e.preventDefault()
$('#intro').slideDown(400, function() {
$('#login_box').hide()
});
})
$('#show-login').click(function(e){
e.preventDefault()
$('#login_form').show()
$('#register_form').hide()
$('#login_box').show()
$('#intro').slideUp()
})'''
s3.jquery_ready.append(script)
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration is True:
# Provide a Registration box on front page
login_buttons.append(A(T("Register"),
_id="show-register",
_class="tiny secondary button",
_style="margin-left:5px"))
script = '''
$('#show-register').click(function(e){
e.preventDefault()
$('#login_form').hide()
$('#register_form').show()
$('#login_box').show()
$('#intro').slideUp()
})'''
s3.jquery_ready.append(script)
register_form = auth.register()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
register_script = '''
$('#register-btn').click(function(e){
e.preventDefault()
$('#register_form').show()
$('#login_form').hide()
})
$('#login-btn').click(function(e){
e.preventDefault()
$('#register_form').hide()
$('#login_form').show()
})'''
s3.jquery_ready.append(register_script)
# Provide a login box on front page
auth.messages.submit_button = T("Login")
login_form = auth.login(inline=True)
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
else:
login_buttons = ""
output["login_buttons"] = login_buttons
output["self_registration"] = self_registration
output["registered"] = registered
output["login_div"] = login_div
output["login_form"] = login_form
output["register_div"] = register_div
output["register_form"] = register_form
output["contact_form"] = contact_form
# Slick slider
if s3.debug:
s3.scripts.append("/%s/static/scripts/slick.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/slick.min.js" % request.application)
script = '''
$(document).ready(function(){
$('#title-image').slick({
autoplay:true,
autoplaySpeed:5000,
speed:1000,
fade:true,
cssEase:'linear'
});
});'''
s3.jquery_ready.append(script)
s3.stylesheets.append("../themes/%s/homepage.css" % THEME)
self._view(THEME, "index.html")
return output
# END =========================================================================
| {
"content_hash": "e86f918328eda5e7585953f4177d1937",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 114,
"avg_line_length": 34.188976377952756,
"alnum_prop": 0.4841087056655919,
"repo_name": "sahana/Turkey",
"id": "51b3d6abc8b6ad4b239d60a0d2499d5d242dc002",
"size": "8709",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modules/templates/DRK/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3336714"
},
{
"name": "HTML",
"bytes": "1369269"
},
{
"name": "JavaScript",
"bytes": "20093511"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31303565"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3208049"
}
],
"symlink_target": ""
} |
import copy
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist, squareform
from yass.evaluate.util import *
def align_template(template, temp_len=40, mode='all'):
window = np.arange(0, temp_len) - temp_len // 2
n_chan = template.shape[1]
main_chan = main_channels(template)[-1]
base_trace = np.zeros(template.shape[0])
base_trace[:] = template[:, main_chan]
temp_norm = np.sum(template * template, axis=0)
base_norm = temp_norm[main_chan]
aligned_temp = np.zeros([temp_len, n_chan])
if mode == 'neg':
base_trace[base_trace > 0] = 0
for c in range(n_chan):
orig_filt = template[:, c]
filt = np.zeros(orig_filt.shape)
filt[:] = orig_filt
if mode == 'neg':
filt[filt > 0] = 0
filt_norm = temp_norm[c]
conv_dist = -2 * np.convolve(filt, np.flip(base_trace, axis=0), mode='same') + base_norm + filt_norm
center = np.argmin(conv_dist)
try:
aligned_temp[:, c] = orig_filt[center + window]
except:
aligned_temp[:, c] = orig_filt[np.arange(0, temp_len)]
return aligned_temp
def recon(template, rank=3):
"""SVD reconstruction of a template."""
u, s, vh = np.linalg.svd(template)
return np.matmul(u[:, :rank] * s[:rank], vh[:rank, :])
def recon_error(template, rank=3):
"""Reconstruction error of SVD with given rank."""
temp_rec = recon(template, rank=rank)
return np.linalg.norm((template - temp_rec))
class Geometry(object):
"""Geometry Object for finidng closest channels."""
def __init__(self, geometry):
self.geom = geometry
self.pdist = squareform(pdist(geometry))
def neighbors(self, channel, size):
return np.argsort(self.pdist[channel, :])[:size]
def vis_chan(template, min_peak_to_peak=1):
"""Visible channels on a standardized template with given threshold."""
return np.max(template, axis=0) - np.min(template, axis=0) > min_peak_to_peak
def conv_dist(ref, temp):
"""l2 distance of temp with all windows of ref."""
return np.convolve((ref * ref), np.ones(len(temp)), mode='valid') - 2 * np.convolve(ref, np.flip(temp, axis=0), mode='valid') + np.sum(temp * temp)
def align_temp_to_temp(ref, temp):
"""Aligns temp with bigger window to ref with smaller window."""
n_chan = ref.shape[1]
shifts = np.zeros(n_chan)
for c in range(n_chan):
shifts[c] = np.argmin(conv_dist(temp[:, c], ref[:, c]))
#plt.plot(conv_dist(temp[:, c], ref[:, c]))
return shifts
def optimal_aligned_compress(template, upsample=5, rank=3, max_shift=6):
"""Greedy local search of alignments for best SVD compression error."""
upsample = 5
max_shift = max_shift * upsample
half_max_shift = max_shift // 2
n_chan = template.shape[1]
n_times = template.shape[0]
template = sp.signal.resample(template, n_times * upsample)
new_times = upsample * n_times
snip_win = (half_max_shift, -half_max_shift)
snip_temp = copy.copy(template[snip_win[0]:snip_win[1], :])
shifts = np.zeros(n_chan, dtype='int')
#
obj = recon_error(snip_temp, rank=rank)
obj_list = []
for i, k in enumerate(reversed(main_channels(template))):
if i == 0:
# main channel do nothing
continue
#cand_chan = np.random.randint(0, n_chan)
cand_chan = k
# obj of jitter -1, 0, 0 respectively
new_obj = np.zeros(max_shift + 1)
for j, jitter in enumerate(range(-half_max_shift, half_max_shift + 1)):
snip_from, snip_to = snip_win[0] + jitter, snip_win[1] + jitter
if snip_to == 0:
snip_to = new_times
snip_temp[:, cand_chan] = template[snip_from:snip_to, cand_chan]
new_obj[j] = recon_error(snip_temp, rank=rank)
#plt.plot(np.arange(- max_shift, max_shift + 1, 1), new_obj)
# Optimal local jitterupsample
opt_shift = np.argmin(new_obj) - half_max_shift
shifts[cand_chan] = opt_shift
snip_from, snip_to = snip_win[0] + opt_shift, snip_win[1] + opt_shift
if snip_to == 0:
snip_to = new_times
snip_temp[:, cand_chan] = template[snip_from:snip_to, cand_chan]
obj = min(new_obj)
obj_list.append(obj)
return snip_temp, obj_list
def optimal_svd_align(template, geometry, rank=3, upsample=5, chunk=7, max_shift=10):
"""Iterative svd then align approach to alignment."""
max_shift = upsample * max_shift
n_times = template.shape[0]
n_chan = template.shape[1]
main_chan = np.flip(main_channels(template), axis=0)
win_len = n_times * upsample - max_shift
# Upsample
temp = sp.signal.resample(template, n_times * upsample)
shifts = np.zeros(n_chan, dtype=int) + max_shift // 2
#
chunk_set = 0
i = 1
terminate = False
while not terminate:
if i * chunk > n_chan:
cum_chan = main_chan
terminate = True
else:
#cum_chan = main_chan[:i * chunk]
cum_chan = geometry.neighbors(main_chan[0], size=chunk * i)
for iteration in range(4):
temp_ref = []
for c in cum_chan:
temp_ref.append(temp[shifts[c]:shifts[c] + win_len, c])
temp_ref = np.array(temp_ref).T
temp_ref_rec = recon(temp_ref, rank=rank)
shifts[cum_chan] = align_temp_to_temp(temp_ref_rec, temp[:, cum_chan])
i += 1
aligned_temp = []
for c in range(n_chan):
aligned_temp.append(temp[shifts[c]:shifts[c] + win_len, c])
return np.array(aligned_temp).T
def plot_spatial(geom, temp, ax, color='C0', alpha=0.7, scale=10., squeeze=8.):
"""Plots template spatially."""
leng = temp.shape[0]
for c in range(temp.shape[1]):
ax.plot(
np.arange(0, leng, 1) / squeeze + geom[c, 0],
temp[:, c] * scale + geom[c, 1], alpha=alpha, color=color, lw=2)
def plot_spatial_fill(geom, temp, ax, color='C0', scale=10., squeeze=8.):
"""Plots standard error for each channel spatially."""
temp_ = temp * 0
leng = temp.shape[0]
for c in range(temp.shape[1]):
ax.fill_between(
np.arange(0, leng, 1) / squeeze + geom[c, 0],
temp_[:, c] - scale + geom[c, 1],
temp_[:, c] + scale + geom[c, 1], color=color, alpha=0.2)
def plot_chan_numbers(geom, ax, offset=10):
"""Plots template spatially.77"""
for c in range(geom.shape[0]):
plt.text(geom[c, 0] + offset, geom[c, 1], str(c), size='large')
def fake_data(spt, temps, length, noise=True):
"""Given a spike train and templates creates a fake data."""
n_time, n_chan, n_unit = temps.shape
data = None
if noise:
data = np.random.normal(0, 1, [length, n_chan])
else:
data = np.zeros([length, n_chan])
for u in range(n_unit):
spt_u = spt[spt[:, 1] == u, 0]
spt_u = spt_u[spt_u < length - n_time]
idx = spt_u + np.arange(0, n_time)[:, np.newaxis]
data[idx, :] += temps[:, :, u][:, np.newaxis, :]
return data
def count_matches(array1, array2, admissible_proximity=40):
"""Finds the matches between two count process.
Returns
-------
tuple of lists
(M, U, M) where M is the list of indices of array2 where
matched with array 1 happened and U contains a list of
indices of array2 where no match with array1 happened.
"""
# In time samples
m, n = len(array1), len(array2)
i, j = 0, 0
count = 0
matched_idx = []
unmatched_idx = []
while i < m and j < n:
if abs(array1[i] - array2[j]) < admissible_proximity:
matched_idx.append(j)
i += 1
j += 1
count += 1
elif array1[i] < array2[j]:
i += 1
else:
unmatched_idx.append(j)
j += 1
return matched_idx, unmatched_idx
def compute_snr(temps):
"""Computes peak to peak SNR for given templates."""
chan_peaks = np.max(temps, axis=0)
chan_lows = np.min(temps, axis=0)
peak_to_peak = chan_peaks - chan_lows
return np.max(peak_to_peak, axis=0)
def enforce_refractory_period(spike_train, refractory_period):
"""Removes spike times that violate refractory period.
Parameters:
-----------
spike_train: numpy.ndarray
Shape (N, 2) where first column indicates spike times
and second column unit identities. Should be sorted
by times across all units.
refractory_period: int
Returns:
--------
np.ndarray of shape shape (N, 2).
"""
n_unit = np.max(spike_train[:, 1])
delete_idx = []
for u in range(n_unit):
sp_idx = np.where(spike_train[:, 1] == u)[0]
sp = spike_train[sp_idx, 0]
diffs = np.diff(sp)
idx = diffs < refractory_period
while np.sum(idx) > 0:
# Remove violating spike times
delete_idx += list(sp_idx[np.where(idx)[0] + 1])
sp_idx = np.delete(sp_idx, np.where(idx)[0] + 1, axis=0)
# Recompute
sp = spike_train[sp_idx, 0]
diffs = np.diff(sp)
idx = diffs < refractory_period
# Remove all the spike times from the original spike train
return np.delete(spike_train, delete_idx, axis=0) | {
"content_hash": "f226978def671330984484e90ecbf2b9",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 151,
"avg_line_length": 34.13768115942029,
"alnum_prop": 0.5812990872426237,
"repo_name": "paninski-lab/yass",
"id": "7541c0b7525f175d144d71ccaa470b7db244a4af",
"size": "9422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/yass/deconvolve/deconv_exp_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8874"
},
{
"name": "C++",
"bytes": "26804"
},
{
"name": "Cuda",
"bytes": "33184"
},
{
"name": "Makefile",
"bytes": "3129"
},
{
"name": "Python",
"bytes": "1658933"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
} |
import codecs
# Use regular expressions
import re
# Nice command line argument parsing
import argparse
# Module to check the properties of wave files
import wave
# Modules to check files and paths
import os.path
import sys
# Module for working with Toolbox files
# Create an command-line argument parser
parser = argparse.ArgumentParser(description="Convert the transcription in a BAS Partitur file with a MAU tier to the Toolbox format.")
# Add arguments with sensible defaults to parser
parser.add_argument("inputfilename", help="the name of the input BAS Partitur file with MAU tier")
parser.add_argument("originalfilename", help="the name of the original BAS Partitur file")
parser.add_argument("outputfilename", help="the name of the output Toolbox file")
parser.add_argument("-toolboxfile", "--toolboxfile", required=False, default=None, help="the name of a Toolbox file to which the time information should be added (defaults to None)")
parser.add_argument("-toolboxtype", "--toolboxtype", required=False, default="Text", help="Toolbox database type to be used when creating a new Toolbox file from scratch (defaults to Text)")
parser.add_argument("-inputenc", "--inputenc", required=False, default="utf-8", help="the input character encoding to be used for the BAS Partitur file with MAU tier (defaults to UTF-8)")
parser.add_argument("-origenc", "--origenc", required=False, default="utf-8", help="the input character encoding to be used for the original BAS Partitur file (defaults to UTF-8)")
parser.add_argument("-toolboxenc", "--toolboxenc", required=False, default="utf-8", help="the character encoding to be used for the original Toolbox file (defaults to UTF-8)")
parser.add_argument("-outputenc", "--outputenc", required=False, default="utf-8", help="the output character encoding to be used (defaults to UTF-8)")
parser.add_argument("-wave", "--wave", required=False, help="the file name of the associated wave file")
parser.add_argument("-samplerate", "--samplerate", required=False, type=int, help="the sample rate of the associated wave file in Hz")
parser.add_argument("-debuglevel", "--debuglevel", required=False, default=1, type=int, choices=[0,1], help="the debug level to be used (0 --> no status messages, 1 --> print status messages)")
parser.add_argument("-outputwordtimes", "--outputwordtimes", required=False, action="store_true", help="output word start and end times into the Toolbox file (otherwise they are omitted)")
parser.add_argument("-keeputterancetimes", "--keeputterancetimes", required=False, action="store_true", help="keep the original utterance start and end times from the Toolbox file (otherwise they are overwritten)")
parser.add_argument("-wordstarttier", "--wordstarttier", required=False, default="WordBegin", help="the name of the tier to store the start times of words (defaults to WordBegin)")
parser.add_argument("-wordendtier", "--wordendtier", required=False, default="WordEnd", help="the name of the tier to store the end times of words (defaults to WordEnd)")
parser.add_argument("-reftier", "--reftier", required=False, default="ref", help="the name of the reference tier (under which utterance start and end times will be added) (defaults to ref)")
parser.add_argument("-texttier", "--texttier", required=False, default="t", help="the name of the tier to write the words to when creating a new Toolbox file from scratch (defaults to t)")
parser.add_argument("-utterancestarttier", "--utterancestarttier", required=False, default="ELANBegin", help="the name of the tier to store the start times of utterances (defaults to ELANBegin)")
parser.add_argument("-utteranceendtier", "--utteranceendtier", required=False, default="ELANEnd", help="the name of the tier to store the end times of utterances (defaults to ELANEnd)")
# Parse command-line arguments
args = vars(parser.parse_args())
# Process obligatory command-line arguments
input_file_name = args["inputfilename"]
original_file_name = args["originalfilename"]
output_file_name = args["outputfilename"]
# Process optional command-line arguments
original_toolbox_file_name = args["toolboxfile"]
toolbox_type = args["toolboxtype"]
input_encoding = args["inputenc"]
original_encoding = args["origenc"]
toolbox_encoding = args["toolboxenc"]
output_encoding = args["outputenc"]
sample_rate = args["samplerate"]
debug_level = args["debuglevel"]
word_start_tier_name = args["wordstarttier"]
word_end_tier_name = args["wordendtier"]
utterance_start_tier_name = args["utterancestarttier"]
utterance_end_tier_name = args["utteranceendtier"]
output_word_times = args["outputwordtimes"]
keep_utterance_times = args["keeputterancetimes"]
reference_tier_name = args["reftier"]
text_tier_name = args["texttier"]
# Compile a regular expression for Toolbox tier and database type names
valid_toolbox_name_re = re.compile(r"^\w+$")
# Make sure that the given reference and text tier names are valid Toolbox tier names
# Check whether the word start tier name is a valid tier name
if not valid_toolbox_name_re.search(reference_tier_name):
print("The reference tier name", reference_tier_name, "is not a valid tier name.")
print("Tier names can only contain ASCII letters, digits and the underscore _.")
print("Tier names cannot contain whitespace.")
sys.exit()
if not valid_toolbox_name_re.search(text_tier_name):
print("The text tier name", reference_tier_name, "is not a valid tier name.")
print("Tier names can only contain ASCII letters, digits and the underscore _.")
print("Tier names cannot contain whitespace.")
sys.exit()
# Print status report
if debug_level == 1:
print("Converting BAS Partitur file", input_file_name, "to Toolbox file", output_file_name, "using the ORT, KAN, and RID tiers from", original_file_name + ".")
if original_toolbox_file_name is not None:
print("Adding the time information to the original Toolbox file", original_toolbox_file_name + ".")
else:
print("Creating a completely new Toolbox file.")
print("Using the reference tier name", reference_tier_name)
print("Using the text tier name", text_tier_name)
# Output word start and end times after the text tier if a new Toolbox file
# is created from scratch
if output_word_times is False:
# Print status
if debug_level == 1:
print("Omitting word start and end times.")
if word_start_tier_name != "WordBegin":
print("Ignoring word start tier name", word_start_tier_name, "because the option outputwordtimes has not been set.")
if word_end_tier_name != "WordEnd":
print("Ignoring word end tier name", word_end_tier_name, "because the option outputwordtimes has not been set.")
else:
# Check whether the word start tier name is a valid tier name
if not valid_toolbox_name_re.search(word_start_tier_name):
print("The word start tier name", word_start_tier_name, "is not a valid tier name.")
print("Tier names can only contain ASCII letters, digits and the underscore _.")
print("Tier names cannot contain whitespace.")
sys.exit()
# Check whether the word end tier name is a valid tier name
if not re.search(r"^\w+$", word_end_tier_name):
print("The word end tier name", word_end_tier_name, "is not a valid tier name.")
print("Tier names can only contain ASCII letters, digits and the underscore _.")
print("Tier names cannot contain whitespace.")
sys.exit()
# Print status message
if debug_level == 1:
print("Also adding tiers for word start and end times to the output Toolbox file.")
print("Using word start tier name", word_start_tier_name)
print("Using word end tier name", word_end_tier_name)
if original_toolbox_file_name is not None:
# If both an original Toolbox file and a Toolbox database type have been specified,
# ignore the latter
if toolbox_type:
if debug_level == 1 and toolbox_type != "Text":
print("Adding information to original Toolbox file", original_toolbox_file_name, " and therefore ignoring the supplied Toolbox database type", toolbox_type + ".")
else:
# If no existing Toolbox file has been provided, make sure that a valid
# Toolbox database type has been supplied
if toolbox_type:
if not re.search(r"^\w+$", toolbox_type):
print(toolbox_type, "is not a valid Toolbox database type name.")
print("Toolbox database type names can only contain ASCII letters, digits and the underscore _.")
print("Toolbox database type names cannot contain whitespace.")
sys.exit()
else:
print("No existing Toolbox file has been supplied.")
print("Therefore you have to provide the name of the Toolbox database type for the newly created Toolbox file.")
sys.exit()
if keep_utterance_times is True:
print("Cannot keep original utterance start and end times when creating a Toolbox file from scratch.")
sys.exit()
# If a wave file was specified, test whether it exists
if "wave" in args and args["wave"] is not None:
wave_file_name = args["wave"]
if os.path.exists(wave_file_name) and os.path.isfile(wave_file_name):
# Try to open it with wave module
wave_file = wave.open(wave_file_name, "r")
# Try to determine its properties
sample_rate = wave_file.getframerate()
else:
wave_file_name = None
if sample_rate is None:
print("You either have to provide the path to the wave file or to specify the sample rate manually.")
sys.exit()
# Function to convert time code hours:minutes:seconds to seconds
# Arguments:
# 1. time code as string
def timecode2seconds(time_code):
# Compile regular expressions for different time format
hours_minutes_seconds_re = re.compile(r"^(\d+):(\d+):(\d+\.\d+)$")
seconds_re = re.compile(r"^(0|(\d+)\.(\d+))$")
# Test what kind of time code we are dealing with
match = hours_minutes_seconds_re.search(time_code)
if match:
hours = match.group(1)
minutes = match.group(2)
seconds = match.group(3)
# Convert complex time code to seconds
try:
seconds = int(hours) * 3600 + int(minutes) * 60 + float(seconds)
except:
print("Could not convert time code", time_code, " to seconds.")
sys.exit()
elif seconds_re.search(time_code):
# Convert simple time code to seconds
try:
seconds = float(time_code)
except:
print("Could not convert time code", time_code, " to seconds.")
sys.exit()
else:
print("Could not match time code", time_code)
sys.exit()
return float(seconds)
# Function to read in the ORT tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of words as tuples (word_id, word)
def readORTFromOriginalBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting ORT tier from original BAS Partitur file", file_name)
# Make a new list of words
words = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("ORT:"):
# Test whether the line can be divided into 3 elements:
# tier marker, word_id and word
elements = line.split()
if len(elements) != 3:
print("Found an ORT tier that does not contain 3 elements (tier marker, number, phoneme) in line:", line_number)
sys.exit()
# Unpack elements into separate variables
(tier_marker, word_id, word) = elements
# Append the current word into the list of words
# (also include the line number)
words.append((word_id, word))
# Close the file
bas_file.close()
# Return the list of words
return words
# Function to read in the KAN tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of words as tuples (word_id, word)
def readKANFromOriginalBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting KAN tier from original BAS Partitur file", file_name)
# Make a new list of words
words = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("KAN:"):
# Test whether the line can be divided into 3 elements:
# tier marker, word_id and word
elements = line.split()
if len(elements) < 3:
print("Found a KAN tier that does not contain at least 3 elements (tier marker, number, phoneme) in line:", line_number)
sys.exit()
if len(elements) == 3:
# Unpack elements into separate variables
(tier_marker, word_id, word) = elements
else:
# Unpack elements into separate variables
tier_marker = elements.pop(0)
word_id = elements.pop(0)
word = " ".join(elements)
# Append the current word into the list of words
# (also include the line number)
words.append((word_id, word))
# Close the file
bas_file.close()
# Return the list of words
return words
# Function to read in the MAU tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of phonemes as tuples (phoneme_id, phoneme)
def readMAUFromBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting MAU tier from BAS Partitur file", file_name)
# Make a new list of words
phonemes = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("MAU:"):
# Test whether the line can be divided into 5 elements:
# tier marker, start, duration, word_id, and phoneme
elements = line.split()
if len(elements) != 5:
print("Found a MAU tier that does not contain 5 elements (tier marker, start time, duration, word id, phoneme) in line:", line_number)
sys.exit()
# Unpack elements into separate variables
(tier_marker, start, duration, word_id, phoneme) = elements
# Append the current word into the list of words
# (also include the line number)
phonemes.append((start, duration, word_id, phoneme))
# Close the file
bas_file.close()
# Return the list of phonemes
return phonemes
# Function to read in the RID tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of utterances as lists of [utterance_id, list of word_ids]
def readRIDFromOriginalBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting RID tier from Original BAS Partitur file", file_name)
# Make a new list of words
utterances = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("RID:"):
# Test whether the line can be divided into 3 elements:
# tier marker, start, duration, word_id, and phoneme
elements = line.split()
if len(elements) < 3:
print("Found a RID tier that does not contain at least 3 elements (tier marker, word ids, utterance id) in line:", line_number)
sys.exit()
elif len(elements) == 3:
# Unpack elements into separate variables
(tier_marker, word_ids, utterance_id) = elements
else:
tier_marker = elements[0]
word_ids = elements[1]
utterance_id = " ".join(elements[2:])
# Split the word ids
list_of_word_ids = word_ids.split(",")
# Append the current utterance into the list of utterances
utterances.append([utterance_id, list_of_word_ids])
# Close the file
bas_file.close()
# Return the list of utterances
return utterances
# Function to combine the start and end times of phonemes into those for words
# Argument:
# 1. A list of phonemes as created by readMAUFromBASFile
# returns a dictionary from word ids to pairs of (start_time, end_time)
def combinePhonemesIntoWords(phonemes):
# Print status report
if debug_level == 1:
print("Combining phoneme start and end times into word start and end times.")
# Dictionary of word ids
word_ids = {}
# Go through list of phonemes
for (start, duration, word_id, phoneme) in phonemes:
# Ignore pauses, etc.
if word_id == "-1":
continue
# Determine whether phonemes of the current word have already been processed
if word_id in word_ids:
# Old start and end time
(old_start_time, old_end_time) = word_ids[word_id]
# Calculate the start and end times of the current phoneme
cur_start_time = int(start)
cur_end_time = int(start) + int(duration)
# Is the current phoneme's start time lower than the old word start time?
if cur_start_time < old_start_time:
new_start_time = cur_start_time
else:
new_start_time = old_start_time
# Is the current phoneme's end time higher than the old word end time?
if cur_end_time > old_end_time:
new_end_time = cur_end_time
else:
new_end_time = old_end_time
# Put updated start and end time into dictionary
word_ids[word_id] = (new_start_time, new_end_time)
else:
new_start_time = int(start)
new_end_time = int(start) + int(duration)
# Put initial start and end time into dictionary
word_ids[word_id] = (new_start_time, new_end_time)
# Return the dictionary of start and end times for words
return word_ids
# Function to combine the start and end times of words into those for utterances
# Arguments:
# 1. A list of utterances as created by readRIDFromOriginalBASFile
# 2. A dictionary of word start and end times as created by combinePhonemesIntoWords
# returns a dictionary from utterance ids to pairs of (start_time, end_time)
def combineWordsIntoUtterances(utterances, words):
# Print status report
if debug_level == 1:
print("Combining word start and end times into utterance start and end times.")
# Dictionary of utterance ids
utterance_ids = {}
# Test
# first_word_id = None
# last_word_id = None
# first_word_start_time = None
# last_word_end_time = None
# Go trough the list of utterances
for utterance in utterances:
utterance_id = utterance[0]
list_of_word_ids = utterance[1]
# print("Utterance id is", utterance_id)
# print("List of word ids is", list_of_word_ids)
# Look up the start time of the first and last words in the utterance
first_word_id = list_of_word_ids[0]
last_word_id = list_of_word_ids[-1]
# Determine the start and end times of these words
if first_word_id in words:
(first_word_start_time, first_word_end_time) = words[first_word_id]
else:
print("Could not find word id", first_word_id, "contained in utterance id", utterance_id)
# sys.exit()
if last_word_id in words:
(last_word_start_time, last_word_end_time) = words[last_word_id]
else:
print("Could not find word id", last_word_id, "contained in utterance id", utterance_id)
# sys.exit()
# Combine start time of first word and end time of last word into
# utterance start and end times
utterance_start_time = first_word_start_time
utterance_end_time = last_word_end_time
# Put the utterance start and end times into the utterance dictionary
utterance_ids[utterance_id] = (utterance_start_time, utterance_end_time)
# Return the dictionary of start and end times for utterances
return utterance_ids
# Function to produce a dictionary from word ids to the orthographic forms of words
# Argument:
# 1. A list of words as produced by readORTFromOriginalBASFile
# returns a dictionary from word ids to ortographic word forms
def makeWordDictionary(list_of_words):
# A dictionary of words
word_dict = {}
# Go through the list of words
for (word_id, word) in list_of_words:
# Put the word into the dictionary
word_dict[word_id] = word
# Return the dictionary
return word_dict
# Function to produce a dictionary from utterance ids to the ids
# of the words contained in the utterance (stored as a list)
# Argument:
# 1. A list of utterances as produced by readRIDFromOriginalBASFile
# returns a dictionary from utterance ids to the word contained in that utterance
def makeUtteranceDictionary(list_of_utterances):
# A dictionary of utterances
utterance_dict = {}
# Go through the list of utterances
for (utterance_id, words) in list_of_utterances:
# Put the list of word ids into the dictionary under the utterance id
utterance_dict[utterance_id] = words
# Return the dictionary
return utterance_dict
# Function to read in an existing Toolbox file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of Toolbox lines as tuples (tier marker, line, line ending)
def readToolboxFile(file_name, encoding="utf-8"):
# Print status message
if debug_level == 1:
print("Reading original Toolbox file", file_name)
# Compile a regular expression to find Toolbox tier markers
tier_marker_re = re.compile("^" + r"\\(\S+)(?=($|\s+))")
# Compile a regular expression to find line endings
line_ending_re = re.compile(r"([\r\n]+)$")
# A list of Toolbox lines
toolbox_lines = []
# Open Toolbox file
toolbox_file = codecs.open(file_name, "r", encoding)
# Go through the lines in the file
for line in toolbox_file:
# Tier marker in current line
cur_tier_marker = None
# Line ending in current line
cur_line_ending = ""
# Search for a tier marker in the current line
match = tier_marker_re.search(line)
if match:
cur_tier_marker = match.group(1)
# Search for a line ending in the current line
match = line_ending_re.search(line)
if match:
cur_line_ending = match.group(1)
# Put together tuple for the current line
cur_line_tuple = (cur_tier_marker, line, cur_line_ending)
# Add current line to the list of lines
toolbox_lines.append(cur_line_tuple)
# Return the list of lines
return toolbox_lines
# Function to annotate an original Toolbox file with additional time information
# Arguments:
# 1. The output file name
# 2. The output file encoding
# 3. The original Toolbox file as read in by readToolboxFile
# 4. The name of the reference tier
# 5. Whether to keep the original utterance start and end times or not (Boolean)
# 6. Whether to output word start and end times or not (Boolean)
# 7. The utterance start and end times (as a dictionary from utterance id to (start, end)
# 8. The Toolbox marker for utterance start times
# 9. The Toolbox marker for utterance end times
# 10. The word start and end times (as a dictionary from word id to (start, end)
# 11. The Toolbox marker for word start times
# 12. The Toolbox marker for word end times
# 13. A dictionary from utterance ids to word ids contained in them as produced by makeUtteranceDictionary
# 14. A dictionary from utterance ids to the original utterance start and end times
# 15. The sample rate to be used to convert samples to seconds
def annotateOriginalToolboxFile(output_file_name, output_encoding, original_toolbox_file, reference_tier_name, keep_utterance_times, output_word_times, utterance_times, utterance_start_marker, utterance_end_marker, word_times, word_start_marker, word_end_marker, utterance_dict, original_utterance_times_dict, sample_rate):
# Compile a regular expression to extract the tier contents
tier_contents_re = re.compile("^" + r"\\(\S+)\s+(.+)$")
# Check that the reference marker actually occurs in the file
reference_tier_encountered = False
# Test
# first_word_start_time = None
# last_word_end_time = None
for line in original_toolbox_file:
# Unpack line contents
(cur_toolbox_marker, cur_line, cur_line_ending) = line
if cur_toolbox_marker == reference_tier_name:
reference_tier_encountered = True
if reference_tier_encountered is False:
print("The supplied reference tier marker", reference_tier_name, "does not occur in the original Toolbox file.")
sys.exit()
# Count line numbers for error reporting
line_number = 0
# Remember whether utterance times were output
utterance_times_output = True
cur_utterance_id = None
# Open the output file
output_file = codecs.open(output_file_name, "w", output_encoding)
# Go through all lines in the original Toolbox file
for line in original_toolbox_file:
# Increase line number
line_number += 1
# Unpack line contents
(cur_toolbox_marker, cur_line, cur_line_ending) = line
# Flags indicating whether utterance times have been output
utterance_start_time_seconds = None
utterance_end_time_seconds = None
# Check whether we have found the reference tier
if cur_toolbox_marker == reference_tier_name:
# Only utterance start time or utterance end time were output
# but not both
if utterance_times_output == "start" or utterance_times_output == "end":
utterance_times_output = False
# Check whether utterance times were output for preceding utterance
if utterance_times_output is False and cur_utterance_id is not None:
print("Could not output any utterance times for utterance", cur_utterance_id)
# sys.exit()
# No utterance times output for current utterance yet
utterance_times_output = False
# Remember start time of first word and end time of last word
# to perform sanity checks
first_word_start_time = None
last_word_end_time = None
# Extract the contents of the reference tier
match = tier_contents_re.search(cur_line)
if match:
cur_utterance_id = match.group(2).strip()
else:
print("Something is wrong. I cannot extract the reference from the reference tier in line " + str(line_number) +".")
print(line)
sys.exit()
# Output the current reference tier line
output_file.write(cur_line)
# Try to find the utterance id in the dictionary with utterance
# start and end times
if cur_utterance_id in utterance_times:
utterance_start_time = utterance_times[cur_utterance_id][0]
utterance_end_time = utterance_times[cur_utterance_id][1]
# Calculate start time in seconds
utterance_start_time_seconds = round(utterance_start_time / sample_rate, 3)
# Calculate end time in seconds
utterance_end_time_seconds = round(utterance_end_time / sample_rate, 3)
# If the original utterance are to be overwritten
if keep_utterance_times is False:
# Output the current utterance start time
output_line = "\\" + utterance_start_marker + " " + "%.3f" % utterance_start_time_seconds + cur_line_ending
output_file.write(output_line)
# Output the current utterance end time
output_line = "\\" + utterance_end_marker + " " + "%.3f" % utterance_end_time_seconds + cur_line_ending
output_file.write(output_line)
# Remember that utterance times were output for current utterance
utterance_times_output = True
# If word times should be output, too
if output_word_times:
# Could all word times be output
erroneous_unit = False
# Look up word ids
if cur_utterance_id in utterance_dict:
cur_words = utterance_dict[cur_utterance_id]
# Lists of word start and end times
word_start_times = []
word_end_times = []
# Go through words
for word in cur_words:
# Look up that word's start and end times
if word in word_times:
word_start_time = word_times[word][0]
word_end_time = word_times[word][1]
# Calculate start time in seconds
word_start_time_seconds = round(word_start_time / sample_rate, 3)
# Calculate end time in seconds
word_end_time_seconds = round(word_end_time / sample_rate, 3)
# Add them to the lists after converting them to strings
word_start_times.append("%.3f" % word_start_time_seconds)
word_end_times.append("%.3f" % word_end_time_seconds)
# Remember word times for sanity checks
if first_word_start_time is None:
first_word_start_time = word_start_time_seconds
last_word_end_time = word_end_time_seconds
else:
print("Could not find word start or end time for word", word + ".")
erroneous_unit = True
# All word times were output correctly?
if erroneous_unit is False:
# Output the start times of the words in the current utterance
output_line = "\\" + word_start_marker + " " + " ".join(word_start_times) + cur_line_ending
output_file.write(output_line)
# Output the end times of the words in the current utterance
output_line = "\\" + word_end_marker + " " + " ".join(word_end_times) + cur_line_ending
output_file.write(output_line)
# Output regular intervals
else:
if cur_utterance_id in original_utterance_times_dict:
if "start" in original_utterance_times_dict[cur_utterance_id] and "end" in original_utterance_times_dict[cur_utterance_id]:
original_utterance_start_time_seconds = original_utterance_times_dict[cur_utterance_id]["start"]
original_utterance_end_time_seconds = original_utterance_times_dict[cur_utterance_id]["end"]
else:
print("Could not determine original utterance start and end times for erroneous utterance", cur_utterance_id)
sys.exit()
else:
print("Could not determine original utterance start and end times for erroneous utterance", cur_utterance_id)
sys.exit()
number_of_words = len(cur_words)
utterance_length = original_utterance_end_time_seconds - original_utterance_start_time_seconds
word_length = utterance_length/number_of_words
word_start_times = []
word_end_times = []
for index in range(number_of_words):
word_start_time_seconds = original_utterance_start_time_seconds + index * word_length + 0.010
word_end_time_seconds = original_utterance_start_time_seconds + (index + 1) * word_length - 0.010
# Add them to the lists after converting them to strings
word_start_times.append("%.3f" % word_start_time_seconds)
word_end_times.append("%.3f" % word_end_time_seconds)
# Output the start times of the words in the current utterance
output_line = "\\" + word_start_marker + " " + " ".join(word_start_times) + cur_line_ending
output_file.write(output_line)
# Output the end times of the words in the current utterance
output_line = "\\" + word_end_marker + " " + " ".join(word_end_times) + cur_line_ending
output_file.write(output_line)
print("Outputting regular intervals for utterance", cur_utterance_id)
if (keep_utterance_times is False) and (utterance_start_time_seconds is not None) and (utterance_end_time_seconds is not None):
if utterance_start_time_seconds > first_word_start_time:
print("Start time of first word in the utterance is before start time of the utterance.")
print("Start time of utterance:", utterance_start_time_seconds)
print("Start time of first word:", first_word_start_time)
sys.exit()
if utterance_end_time_seconds < last_word_end_time:
print("End time of last word in the utterance is after end time of the utterance.")
print("End time of utterance:", utterance_end_time_seconds)
print("End time of last word:", last_word_end_time)
sys.exit()
else:
print("No word times output for empty utterance", cur_utterance_id)
else:
# Utterance contains no words
if cur_utterance_id not in utterance_dict:
utterance_start_time_seconds = None
utterance_end_time_seconds = None
# Warning
print("Warning: Could not determine utterance start and end times for empty utterance", cur_utterance_id)
print("Will keep original start and end times if present.")
else:
print("Could not determine utterance start and end times for utterance", cur_utterance_id)
sys.exit()
# Toolbox line except for the reference line encountered
else:
# Normally ignore the original utterance start and end markers
# unless the current utterance is empty
if (cur_toolbox_marker == utterance_start_marker) or (cur_toolbox_marker == utterance_end_marker):
# Current utterance seems to be empty
# Therefore output original utterance start or end
# marker anyway
if keep_utterance_times is True or utterance_times_output is not True:
output_file.write(cur_line)
# Check that word times are within utterance times
if cur_toolbox_marker == utterance_start_marker:
cur_line_contents = cur_line.strip().split()[-1]
try:
utterance_start_time_seconds = timecode2seconds(cur_line_contents)
except:
print("Could not determine utterance start time from existing utterance time tier.")
print("Current utterance", cur_utterance_id)
print(cur_line)
sys.exit()
if first_word_start_time is not None:
if utterance_start_time_seconds > first_word_start_time:
print("Start time of first word in the utterance is before start time of the utterance.")
print("Start time of utterance:", utterance_start_time_seconds)
print("Start time of first word:", first_word_start_time)
sys.exit()
# Remember that utterance times were output for current utterance
if utterance_times_output == "end":
utterance_times_output = True
else:
utterance_times_output = "start"
if cur_toolbox_marker == utterance_end_marker:
cur_line_contents = cur_line.strip().split()[-1]
try:
utterance_end_time_seconds = timecode2seconds(cur_line_contents)
except:
print("Could not determine utterance end time from existing utterance time tier.")
print("Current utterance", cur_utterance_id)
print(cur_line)
sys.exit()
if last_word_end_time is not None:
if utterance_end_time_seconds < last_word_end_time:
print("End time of last word in the utterance is after end time of the utterance.")
print("End time of utterance:", utterance_end_time_seconds)
print("End time of last word:", last_word_end_time)
sys.exit()
# Remember that utterance times were output for current utterance
if utterance_times_output == "start":
utterance_times_output = True
else:
utterance_times_output = "end"
# Output any other lines unchanged
else:
output_file.write(cur_line)
# Close the output file
output_file.close()
# Function to write a new Toolbox file from scratch
# Arguments:
# 1. The output file name
# 2. The output file encoding
# 3. The name of the reference tier
# 4. The name of the text tier
# 5. The name of the Toolbox database type
# 6. Whether to output word start and end times or not (Boolean)
# 7. The utterances from the BAS Partitur file as read in by readRIDFromOriginalBASFile
# 8. The utterance start and end times (as a dictionary from utterance id to (start, end)
# 9. The Toolbox marker for utterance start times
# 10. The Toolbox marker for utterance end times
# 11. The word start and end times (as a dictionary from word id to (start, end)
# 12. The Toolbox marker for word start times
# 13. The Toolbox marker for word end times
# 14. A dictionary from word ids to orthographic word forms
# 15. The sample rate to be used to convert samples to seconds
def writeNewToolboxFile(output_file_name, output_encoding, reference_tier_name, text_tier_name, toolbox_type, output_word_times, utterances, utterance_times, utterance_start_marker, utterance_end_marker, word_times, word_start_marker, word_end_marker, word_dict, sample_rate):
# Open the output file
output_file = codecs.open(output_file_name, "w", output_encoding)
# Use Windows line endings \r\n throughout because Toolbox
# is a Windows program
# Write the Toolbox header
# TODO: Look up what the number in the Toolbox header means!
output_line = "\\_sh v3.0 400 " + toolbox_type + "\r\n"
output_file.write(output_line)
# Output empty line
output_file.write("\r\n")
# Go through all utterances
for utterance in utterances:
# Unpack values
(utterance_id, words) = utterance
# Output the reference tier with the utterance ID
output_line = "\\" + reference_tier_name + " " + utterance_id + "\r\n"
output_file.write(output_line)
# Output the utterance start and end time
if utterance_id in utterance_times:
utterance_start_time = utterance_times[utterance_id][0]
utterance_end_time = utterance_times[utterance_id][1]
# Calculate start time in seconds
utterance_start_time_seconds = round(utterance_start_time / sample_rate, 3)
# Calculate end time in seconds
utterance_end_time_seconds = round(utterance_end_time / sample_rate, 3)
# Output the current utterance start time
output_line = "\\" + utterance_start_marker + " " + "%.3f" % utterance_start_time_seconds + "\r\n"
output_file.write(output_line)
# Output the current utterance end time
output_line = "\\" + utterance_end_marker + " " + "%.3f" % utterance_end_time_seconds + "\r\n"
output_file.write(output_line)
else:
print("Could not determine utterance start and end times for utterance", utterance_id)
sys.exit()
# Build information about the words in the utterance
word_forms = []
for word in words:
# Look up the word form
if word in word_dict:
word_form = word_dict[word]
word_forms.append(word_form.strip())
else:
print("Could not determine orthographic word form for word", word + ".")
sys.exit()
# Build text tier line
text_line = "\\" + text_tier_name + " " + " ".join(word_forms) + "\r\n"
# Output the text tier directly if no word start and end times
# are output
if output_word_times is False:
# Output empty line
output_file.write("\r\n")
# Output text tier
output_file.write(text_line)
# Should word start and end times also be output?
else:
word_start_times = []
word_end_times = []
for word in words:
# Look up the start and end times
if word in word_times:
word_start_time = word_times[word][0]
word_end_time = word_times[word][1]
# Calculate start time in seconds
word_start_time_seconds = round(word_start_time / sample_rate, 3)
# Calculate end time in seconds
word_end_time_seconds = round(word_end_time / sample_rate, 3)
# Add them to the lists after converting them to strings
word_start_times.append("%.3f" % word_start_time_seconds)
word_end_times.append("%.3f" % word_end_time_seconds)
else:
print("Could not find word start or end time for word", word + ".")
sys.exit()
# Output tiers for word start and end times
output_line = "\\" + word_start_marker + " " + " ".join(word_start_times) + "\r\n"
output_file.write(output_line)
output_line = "\\" + word_end_marker + " " + " ".join(word_end_times) + "\r\n"
output_file.write(output_line)
# Output empty line
output_file.write("\r\n")
# Output text tier
output_file.write(text_line)
# Output empty lines
output_file.write("\r\n")
output_file.write("\r\n")
# Close the output file
output_file.close()
def readUtteranceTimesFromOriginalToolboxFile(toolbox_file, reference_tier_name, utterance_start_tier_name, utterance_end_tier_name):
cur_utterance_id = None
original_utterance_times = dict()
for line in toolbox_file:
# Unpack line contents
(cur_toolbox_marker, cur_line, cur_line_ending) = line
# Reference tier?
if cur_toolbox_marker == reference_tier_name:
cur_utterance_id = None
# Remember current utterance id
cur_utterance_id = cur_line.strip()
match = re.search(r"\s+(.+)$", cur_utterance_id)
if match:
cur_utterance_id = match.group(1)
# Extract utterance start time
if cur_toolbox_marker == utterance_start_tier_name:
cur_utterance_start_time = cur_line.strip().split()[-1]
cur_utterance_start_time_seconds = timecode2seconds(cur_utterance_start_time)
if cur_utterance_id is not None:
if cur_utterance_id not in original_utterance_times:
original_utterance_times[cur_utterance_id] = {}
original_utterance_times[cur_utterance_id]["start"] = cur_utterance_start_time_seconds
cur_utterance_start_time = None
cur_utterance_start_time_seconds = None
# Extract utterance end time
if cur_toolbox_marker == utterance_end_tier_name:
cur_utterance_end_time = cur_line.strip().split()[-1]
cur_utterance_end_time_seconds = timecode2seconds(cur_utterance_end_time)
if cur_utterance_id is not None:
if cur_utterance_id not in original_utterance_times:
original_utterance_times[cur_utterance_id] = {}
original_utterance_times[cur_utterance_id]["end"] = cur_utterance_end_time_seconds
cur_utterance_end_time = None
cur_utterance_end_time_seconds = None
return original_utterance_times
# Read in the ORT tier from the original BAS Partitur file
ort_tier = readORTFromOriginalBASFile(original_file_name, original_encoding)
# Read in the KAN tier from the original BAS Partitur file
kan_tier = readKANFromOriginalBASFile(original_file_name, original_encoding)
# Read in the RID tier from the original BAS Partitur file
rid_tier = readRIDFromOriginalBASFile(original_file_name, original_encoding)
# Read in the MAU tier from the BAS Partitur file
mau_tier = readMAUFromBASFile(input_file_name, input_encoding)
# Combine phoneme start and end times into word start and end times
word_times = combinePhonemesIntoWords(mau_tier)
# Combine word start and end times into utterance start and end times
utterance_times = combineWordsIntoUtterances(rid_tier, word_times)
# Make a dictionary from word ids to word forms
word_dict = makeWordDictionary(ort_tier)
# Make a dictionary from utterance ids to the words contained in the utterances
utterance_dict = makeUtteranceDictionary(rid_tier)
# Print status message
if debug_level == 1:
print("Writing Toolbox file.")
# Add time annotation to an original Toolbox file
if original_toolbox_file_name:
original_toolbox_file = readToolboxFile(original_toolbox_file_name, toolbox_encoding)
original_utterance_times_dict = readUtteranceTimesFromOriginalToolboxFile(original_toolbox_file, reference_tier_name, utterance_start_tier_name, utterance_end_tier_name)
annotateOriginalToolboxFile(output_file_name, output_encoding, original_toolbox_file, reference_tier_name, keep_utterance_times, output_word_times, utterance_times, utterance_start_tier_name, utterance_end_tier_name, word_times, word_start_tier_name, word_end_tier_name, utterance_dict, original_utterance_times_dict, sample_rate)
# Write a new Toolbox file from scratch
else:
writeNewToolboxFile(output_file_name, output_encoding, reference_tier_name, text_tier_name, toolbox_type, output_word_times, rid_tier, utterance_times, utterance_start_tier_name, utterance_end_tier_name, word_times, word_start_tier_name, word_end_tier_name, word_dict, sample_rate)
if debug_level == 1:
print("Done.")
| {
"content_hash": "2f990e31b9a6d1d1ee1568869e7e614a",
"timestamp": "",
"source": "github",
"line_count": 1270,
"max_line_length": 334,
"avg_line_length": 42.18503937007874,
"alnum_prop": 0.5780867942137191,
"repo_name": "janstrunk/LangDocMAUS",
"id": "64b18c2eff5f996c87f363a35de557df3c97c8f4",
"size": "56107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MAU2Toolbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2339"
},
{
"name": "Python",
"bytes": "168231"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Domain(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.ternary"
_path_str = "layout.ternary.domain"
_valid_props = {"column", "row", "x", "y"}
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this ternary subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this ternary subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this ternary subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this ternary subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this ternary subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this ternary subplot .
x
Sets the horizontal domain of this ternary subplot (in
plot fraction).
y
Sets the vertical domain of this ternary subplot (in
plot fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.ternary.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this ternary subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this ternary subplot .
x
Sets the horizontal domain of this ternary subplot (in
plot fraction).
y
Sets the vertical domain of this ternary subplot (in
plot fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.ternary.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
_v = column if column is not None else _v
if _v is not None:
self["column"] = _v
_v = arg.pop("row", None)
_v = row if row is not None else _v
if _v is not None:
self["row"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "8fb04e311cbdd889699b04d25b43036c",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 84,
"avg_line_length": 28.106280193236714,
"alnum_prop": 0.5106565830182194,
"repo_name": "plotly/python-api",
"id": "e17b94fc5184e78215f7b463e0517affafba1173",
"size": "5818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/ternary/_domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import cairocffi
from . import base
from os import statvfs
import time
__all__ = [
'CPUGraph',
'MemoryGraph',
'SwapGraph',
'NetGraph',
'HDDGraph',
'HDDBusyGraph',
]
class _Graph(base._Widget):
fixed_upper_bound = False
defaults = [
("graph_color", "18BAEB", "Graph color"),
("fill_color", "1667EB.3", "Fill color for linefill graph"),
("border_color", "215578", "Widget border color"),
("border_width", 2, "Widget border width"),
("margin_x", 3, "Margin X"),
("margin_y", 3, "Margin Y"),
("samples", 100, "Count of graph samples."),
("frequency", 1, "Update frequency in seconds"),
("type", "linefill", "'box', 'line', 'linefill'"),
("line_width", 3, "Line width"),
("start_pos", "bottom", "Drawer starting position ('bottom'/'top')"),
]
def __init__(self, width=100, **config):
base._Widget.__init__(self, width, **config)
self.add_defaults(_Graph.defaults)
self.values = [0] * self.samples
self.maxvalue = 0
self.oldtime = time.time()
self.lag_cycles = 0
def timer_setup(self):
self.timeout_add(self.frequency, self.update)
@property
def graphwidth(self):
return self.width - self.border_width * 2 - self.margin_x * 2
@property
def graphheight(self):
return self.bar.height - self.margin_y * 2 - self.border_width * 2
def draw_box(self, x, y, values):
step = self.graphwidth / float(self.samples)
self.drawer.set_source_rgb(self.graph_color)
for val in values:
val = self.val(val)
self.drawer.fillrect(x, y - val, step, val)
x += step
def draw_line(self, x, y, values):
step = self.graphwidth / float(self.samples - 1)
self.drawer.ctx.set_line_join(cairocffi.LINE_JOIN_ROUND)
self.drawer.set_source_rgb(self.graph_color)
self.drawer.ctx.set_line_width(self.line_width)
for val in values:
self.drawer.ctx.line_to(x, y - self.val(val))
x += step
self.drawer.ctx.stroke()
def draw_linefill(self, x, y, values):
step = self.graphwidth / float(self.samples - 2)
self.drawer.ctx.set_line_join(cairocffi.LINE_JOIN_ROUND)
self.drawer.set_source_rgb(self.graph_color)
self.drawer.ctx.set_line_width(self.line_width)
for index, val in enumerate(values):
self.drawer.ctx.line_to(x + index * step, y - self.val(val))
self.drawer.ctx.stroke_preserve()
self.drawer.ctx.line_to(
x + (len(values) - 1) * step,
y - 1 + self.line_width / 2.0
)
self.drawer.ctx.line_to(x, y - 1 + self.line_width / 2.0)
self.drawer.set_source_rgb(self.fill_color)
self.drawer.ctx.fill()
def val(self, val):
if self.start_pos == 'bottom':
return val
elif self.start_pos == 'top':
return -val
else:
raise ValueError("Unknown starting position: %s." % self.start_pos)
def draw(self):
self.drawer.clear(self.background or self.bar.background)
if self.border_width:
self.drawer.set_source_rgb(self.border_color)
self.drawer.ctx.set_line_width(self.border_width)
self.drawer.ctx.rectangle(
self.margin_x + self.border_width / 2.0,
self.margin_y + self.border_width / 2.0,
self.graphwidth + self.border_width,
self.bar.height - self.margin_y * 2 - self.border_width,
)
self.drawer.ctx.stroke()
x = self.margin_x + self.border_width
y = self.margin_y + self.border_width
if self.start_pos == 'bottom':
y += self.graphheight
elif not self.start_pos == 'top':
raise ValueError("Unknown starting position: %s." % self.start_pos)
k = 1.0 / (self.maxvalue or 1)
scaled = [self.graphheight * val * k for val in reversed(self.values)]
if self.type == "box":
self.draw_box(x, y, scaled)
elif self.type == "line":
self.draw_line(x, y, scaled)
elif self.type == "linefill":
self.draw_linefill(x, y, scaled)
else:
raise ValueError("Unknown graph type: %s." % self.type)
self.drawer.draw(offsetx=self.offset, width=self.width)
def push(self, value):
if self.lag_cycles > self.samples:
# compensate lag by sending the same value up to
# the graph samples limit
self.lag_cycles = 1
self.values = ([value] * min(self.samples, self.lag_cycles)) + self.values
self.values = self.values[:self.samples]
if not self.fixed_upper_bound:
self.maxvalue = max(self.values)
self.draw()
def update(self):
# lag detection
newtime = time.time()
self.lag_cycles = int((newtime - self.oldtime) / self.frequency)
self.oldtime = newtime
self.update_graph()
self.timeout_add(self.frequency, self.update)
def fullfill(self, value):
self.values = [value] * len(self.values)
class CPUGraph(_Graph):
"""
Display CPU usage graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("core", "all", "Which core to show (all/0/1/2/...)"),
]
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(CPUGraph.defaults)
self.maxvalue = 100
self.oldvalues = self._getvalues()
def _getvalues(self):
with open('/proc/stat') as file:
lines = file.readlines()
# default to all cores (first line)
line = lines.pop(0)
# core specified, grab the corresponding line
if isinstance(self.core, int):
# we already removed the first line from the list,
# so it's 0 indexed now :D
line = lines[self.core]
if not line.startswith("cpu%s" % self.core):
raise ValueError("No such core: %s" % self.core)
name, user, nice, sys, idle, iowait, tail = line.split(None, 6)
return (int(user), int(nice), int(sys), int(idle))
def update_graph(self):
nval = self._getvalues()
oval = self.oldvalues
busy = nval[0] + nval[1] + nval[2] - oval[0] - oval[1] - oval[2]
total = busy + nval[3] - oval[3]
# sometimes this value is zero for unknown reason (time shift?)
# we just sent the previous value, because it gives us no info about
# cpu load, if it's zero.
if total:
push_value = busy * 100.0 / total
self.push(push_value)
else:
self.push(self.values[0])
self.oldvalues = nval
def get_meminfo():
with open('/proc/meminfo') as file:
val = {}
for line in file:
key, tail = line.split(':')
uv = tail.split()
val[key] = int(uv[0])
return val
class MemoryGraph(_Graph):
"""
Displays a memory usage graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['MemTotal']
mem = val['MemTotal'] - val['MemFree'] - val['Buffers'] - val['Cached']
self.fullfill(mem)
def _getvalues(self):
return get_meminfo()
def update_graph(self):
val = self._getvalues()
self.push(
val['MemTotal'] - val['MemFree'] - val['Buffers'] - val['Cached']
)
class SwapGraph(_Graph):
"""
Display a swap info graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['SwapTotal']
swap = val['SwapTotal'] - val['SwapFree'] - val.get('SwapCached', 0)
self.fullfill(swap)
def _getvalues(self):
return get_meminfo()
def update_graph(self):
val = self._getvalues()
swap = val['SwapTotal'] - val['SwapFree'] - val.get('SwapCached', 0)
# can change, swapon/off
if self.maxvalue != val['SwapTotal']:
self.maxvalue = val['SwapTotal']
self.fullfill(swap)
self.push(swap)
class NetGraph(_Graph):
"""
Display a network usage graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
(
"interface",
"auto",
"Interface to display info for ('auto' for detection)"
),
("bandwidth_type", "down", "down(load)/up(load)"),
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(NetGraph.defaults)
if self.interface == "auto":
try:
self.interface = self.get_main_iface()
except RuntimeError:
self.log.warning(
"NetGraph - Automatic interface detection failed, "
"falling back to 'eth0'"
)
self.interface = "eth0"
self.filename = '/sys/class/net/{interface}/statistics/{type}'.format(
interface=self.interface,
type=self.bandwidth_type == 'down' and 'rx_bytes' or 'tx_bytes'
)
self.bytes = 0
self.bytes = self._getValues()
def _getValues(self):
try:
with open(self.filename) as file:
val = int(file.read())
rval = val - self.bytes
self.bytes = val
return rval
except IOError:
return 0
def update_graph(self):
val = self._getValues()
self.push(val)
@staticmethod
def get_main_iface():
make_route = lambda line: dict(zip(['iface', 'dest'], line.split()))
with open('/proc/net/route', 'r') as fp:
lines = fp.readlines()
routes = [make_route(line) for line in lines[1:]]
try:
return next(
(r for r in routes if not int(r['dest'], 16)),
routes[0]
)['iface']
except (KeyError, IndexError, ValueError):
raise RuntimeError('No valid interfaces available')
class HDDGraph(_Graph):
"""
Display HDD free or used space graph.
"""
fixed_upper_bound = True
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("path", "/", "Partition mount point."),
("space_type", "used", "free/used")
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(HDDGraph.defaults)
stats = statvfs(self.path)
self.maxvalue = stats.f_blocks * stats.f_frsize
values = self._getValues()
self.fullfill(values)
def _getValues(self):
stats = statvfs(self.path)
if self.space_type == 'used':
return (stats.f_blocks - stats.f_bfree) * stats.f_frsize
else:
return stats.f_bavail * stats.f_frsize
def update_graph(self):
val = self._getValues()
self.push(val)
class HDDBusyGraph(_Graph):
"""
Parses /sys/block/<dev>/stat file and extracts overall device
IO usage, based on ``io_ticks``'s value.
See https://www.kernel.org/doc/Documentation/block/stat.txt
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("device", "sda", "Block device to display info for")
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(HDDBusyGraph.defaults)
self.path = '/sys/block/{dev}/stat'.format(
dev=self.device
)
self._prev = 0
def _getActivity(self):
try:
# io_ticks is field number 9
with open(self.path) as f:
io_ticks = int(f.read().split()[9])
except IOError:
return 0
activity = io_ticks - self._prev
self._prev = io_ticks
return activity
def update_graph(self):
self.push(self._getActivity())
| {
"content_hash": "6dba76c88de3aee8c73d220921c2a59c",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 82,
"avg_line_length": 31.482233502538072,
"alnum_prop": 0.5485327313769752,
"repo_name": "farebord/qtile",
"id": "d9c5d78a0fc21103b6d36b0d6d8e896291f4a88e",
"size": "13958",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "libqtile/widget/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3590"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "909733"
},
{
"name": "Shell",
"bytes": "2833"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from logging import getLogger
import chainer
from chainer import functions as F
from chainerrl import distribution
from chainerrl.links.mlp import MLP
from chainerrl.policy import Policy
logger = getLogger(__name__)
class SoftmaxPolicy(chainer.Chain, Policy):
"""Softmax policy that uses Boltzmann distributions.
Args:
model (chainer.Link):
Link that is callable and outputs action values.
beta (float):
Parameter of Boltzmann distributions.
"""
def __init__(self, model, beta=1.0, min_prob=0.0):
self.beta = beta
self.min_prob = min_prob
super().__init__(model=model)
def __call__(self, x):
h = self.model(x)
return distribution.SoftmaxDistribution(
h, beta=self.beta, min_prob=self.min_prob)
class FCSoftmaxPolicy(SoftmaxPolicy):
"""Softmax policy that consists of FC layers and rectifiers"""
def __init__(self, n_input_channels, n_actions,
n_hidden_layers=0, n_hidden_channels=None,
beta=1.0, nonlinearity=F.relu,
last_wscale=1.0,
min_prob=0.0):
self.n_input_channels = n_input_channels
self.n_actions = n_actions
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.beta = beta
super().__init__(
model=MLP(n_input_channels,
n_actions,
(n_hidden_channels,) * n_hidden_layers,
nonlinearity=nonlinearity,
last_wscale=last_wscale),
beta=self.beta,
min_prob=min_prob)
| {
"content_hash": "3ab96cd0103b5e5aa3659fa54394366f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 66,
"avg_line_length": 30.21875,
"alnum_prop": 0.6137538779731128,
"repo_name": "toslunar/chainerrl",
"id": "bca38d61cb22aeed03658602ca5bf1db814b2e2d",
"size": "1934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainerrl/policies/softmax_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "837028"
},
{
"name": "Shell",
"bytes": "11208"
}
],
"symlink_target": ""
} |
"""Generate bibliography.rst
The input file is of the following form. Blank lines are ignored. New
lines are collapsed to a single space. Besides newlines, text for each
entry is copied without filtering, and thus reST syntax can be
used. Note that the key line must begin with '['.
[M55]
G. H. Mealy. `A Method for Synthesizing Sequential Circuits
<http://dx.doi.org/10.1002/j.1538-7305.1955.tb03788.x>`_. *Bell System
Technical Journal (BSTJ)*, Vol.34, No.5, pp. 1045 -- 1079, September,
1955.
"""
from __future__ import print_function
import sys
import io
def print_entry(out_f, bkey, entry_text):
nl = '\n'
idt = 4*' '
if bkey is not None:
bkey_canon = bkey.lower()
out_f.write(':raw-html:`<a href="#'+bkey_canon+'" id="'+bkey_canon+
'">['+bkey+']</a>`'+nl+idt+'\\'+entry_text+2*nl)
if len(sys.argv) < 2:
print('Usage: genbib.py FILE')
sys.exit(1)
out_f = io.open('bibliography.rst', 'w')
with io.open(sys.argv[1], 'r') as f:
bkey = None
entry_text = None
out_f.write(u'''Bibliography
============
.. role:: raw-html(raw)
:format: html
''')
for line in f:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '[' and line:
print_entry(out_f, bkey, entry_text)
closing_sym = line.index(']')
bkey = line[1:closing_sym]
entry_text = u''
elif bkey is None:
ValueError('Entry text found without preceding key.')
else:
entry_text += line+' '
print_entry(out_f, bkey, entry_text)
| {
"content_hash": "2c594507deccd47f325e7f0f85bf6538",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 75,
"avg_line_length": 27.517241379310345,
"alnum_prop": 0.5883458646616542,
"repo_name": "necozay/tulip-control",
"id": "c7924834f2797bf64217a23bd4ea2ba1009fb7d8",
"size": "1618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/genbib.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "34875"
},
{
"name": "Makefile",
"bytes": "3038"
},
{
"name": "Matlab",
"bytes": "43424"
},
{
"name": "Python",
"bytes": "881790"
},
{
"name": "Ruby",
"bytes": "2209"
},
{
"name": "Shell",
"bytes": "9533"
},
{
"name": "Tcl",
"bytes": "2913"
}
],
"symlink_target": ""
} |
import json
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.slack_hook import SlackHook
from airflow.exceptions import AirflowException
class SlackAPIOperator(BaseOperator):
"""
Base Slack Operator
The SlackAPIPostOperator is derived from this operator.
In the future additional Slack API Operators will be derived from this class as well
:param slack_conn_id: Slack connection ID which its password is Slack API token
:type slack_conn_id: string
:param token: Slack API token (https://api.slack.com/web)
:type token: string
:param method: The Slack API Method to Call (https://api.slack.com/methods)
:type method: string
:param api_params: API Method call parameters (https://api.slack.com/methods)
:type api_params: dict
"""
@apply_defaults
def __init__(self,
slack_conn_id=None,
token=None,
method=None,
api_params=None,
*args, **kwargs):
super(SlackAPIOperator, self).__init__(*args, **kwargs)
if token is None and slack_conn_id is None:
raise AirflowException('No valid Slack token nor slack_conn_id supplied.')
if token is not None and slack_conn_id is not None:
raise AirflowException('Cannot determine Slack credential when both token and slack_conn_id are supplied.')
self.token = token
self.slack_conn_id = slack_conn_id
self.method = method
self.api_params = api_params
def construct_api_call_params(self):
"""
Used by the execute function. Allows templating on the source fields of the api_call_params dict before construction
Override in child classes.
Each SlackAPIOperator child class is responsible for having a construct_api_call_params function
which sets self.api_call_params with a dict of API call parameters (https://api.slack.com/methods)
"""
pass
def execute(self, **kwargs):
"""
SlackAPIOperator calls will not fail even if the call is not unsuccessful.
It should not prevent a DAG from completing in success
"""
if not self.api_params:
self.construct_api_call_params()
slack = SlackHook(token=self.token, slack_conn_id=self.slack_conn_id)
slack.call(self.method, self.api_params)
class SlackAPIPostOperator(SlackAPIOperator):
"""
Posts messages to a slack channel
:param channel: channel in which to post message on slack name (#general) or ID (C12318391)
:type channel: string
:param username: Username that airflow will be posting to Slack as
:type username: string
:param text: message to send to slack
:type text: string
:param icon_url: url to icon used for this message
:type icon_url: string
:param attachments: extra formatting details - see https://api.slack.com/docs/attachments
:type attachments: array of hashes
"""
template_fields = ('username', 'text', 'attachments', 'channel')
ui_color = '#FFBA40'
@apply_defaults
def __init__(self,
channel='#general',
username='Airflow',
text='No message has been set.\n'
'Here is a cat video instead\n'
'https://www.youtube.com/watch?v=J---aiyznGQ',
icon_url='https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
attachments=None,
*args, **kwargs):
self.method = 'chat.postMessage'
self.channel = channel
self.username = username
self.text = text
self.icon_url = icon_url
self.attachments = attachments
super(SlackAPIPostOperator, self).__init__(method=self.method,
*args, **kwargs)
def construct_api_call_params(self):
self.api_params = {
'channel': self.channel,
'username': self.username,
'text': self.text,
'icon_url': self.icon_url,
'attachments': json.dumps(self.attachments),
}
| {
"content_hash": "0b981ba41ec90a86ae1178b0b92655a5",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 124,
"avg_line_length": 37.839285714285715,
"alnum_prop": 0.6285983954695611,
"repo_name": "yati-sagade/incubator-airflow",
"id": "8398a7aea4433560eb5db36b10851caab0f0da28",
"size": "4805",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "airflow/operators/slack_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152530"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2828163"
},
{
"name": "Shell",
"bytes": "34436"
}
],
"symlink_target": ""
} |
"""
WSGI config for a_z_challenge project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "a_z_challenge.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "fb23d5e6f3f78a3acd2a2557f631b53f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.642857142857142,
"alnum_prop": 0.770573566084788,
"repo_name": "GyrosOfWar/a-z-challenge-py",
"id": "6c32dbbed8ee04a6ed51375bda8626a792e790ae",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "a_z_challenge/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5839"
}
],
"symlink_target": ""
} |
import argparse
import os
import sys
import numpy as np
from scipy import ndimage
import gram
from gram import JoinMode
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Synthesize image from texture", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output-width', '-ow', default=512, type=int,
help="Pixel width of generated image")
parser.add_argument('--output-height', '-oh', type=int,
help="Pixel height of generated image. If not specified, equal to output-width.")
parser.add_argument('--octaves', '-o', type=int, default=4,
help="Number of octaves (where 1 means \"Consider only 1 scale\")")
parser.add_argument('--layers', '-l', type=int, nargs='+', default=[2, 7],
help="Which layers to match gram matrices on")
parser.add_argument('--max-iter', '-m', type=int, default=500,
help="Maximum iterations for the L-BFGS-B optimizer")
parser.add_argument("--output-prefix", "-op", default='out',
help="Prefix to append to output directory")
parser.add_argument("--save-every", "-n", default=10, type=int,
help="Save an in-progress optimization image every SAVE_EVERY iterations")
parser.add_argument("--source-scale", "-ss", type=float,
help="How much to scale the source image by")
parser.add_argument("--source-width", "-sw", type=int,
help="Scale source to this width. Mutually exclusive with source-scale")
parser.add_argument("--padding-mode", "-p", type=str, choices = ['valid', 'same'], default='valid',
help="What boundary condition to use for convolutions")
parser.add_argument("--join-mode", "-j", type=JoinMode,
choices = list(JoinMode),
default=JoinMode.AVERAGE,
help="How to combine gram matrices when multiple sources given")
parser.add_argument("--count", "-c", type=int, default=1,
help="How many images to generate simultaneously")
parser.add_argument("--mul", type=float, default=1.0, help="Multiply target grams by this amount")
parser.add_argument("--if-weight", type=float, default=1., help="Inter-frame loss weight")
parser.add_argument("--if-shift", type=float, default=5., help="How many pixel-shift should inter-frame loss approximate?")
parser.add_argument("--if-order", type=int, default=2, help="How many frames should we 'tie' together?")
parser.add_argument("--if-distance-type", type=str, choices = ['l2', 'lap1'], default="l2", help="How should we measure the distance between frames?")
parser.add_argument("--if-octaves", type=int, default=1, help="At how many scales should the distance function operate?")
parser.add_argument("--seed", type=str, choices = ['random', 'symmetric'], default='random', help="How to seed the optimization")
parser.add_argument("--data-dir", "-d", type=str, default="model_data", help="Where to find the VGG weight files")
parser.add_argument("--output-dir", type=str, default="outputs", help="Where to save the generated outputs")
parser.add_argument("--tol", type=float, default=1e-9, help="Gradient scale at which to terminate optimization")
parser.add_argument("--source", "-s", required=True, nargs='+',
help="List of file to use as source textures")
args = parser.parse_args()
# Any necessary validation here?
if args.if_octaves > args.octaves:
print("Error: if_octaves must be less than octaves, but %d > %d" % (args.if_octaves, args.octaves))
sys.exit(1)
output_size = (args.output_width, args.output_height if args.output_height is not None else args.output_width)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
output_dir = "{}.L{}.o{}".format(args.output_prefix, ",".join(str(l) for l in args.layers), args.octaves)
output_dir = os.path.join(args.output_dir, output_dir)
if args.source_scale:
output_dir += ".w{:.2}".format(args.source_scale)
if args.source_width:
output_dir += ".w{}".format(args.source_width)
if args.count > 1:
output_dir += ".c{}.ifs{}".format(args.count, args.if_shift)
if args.mul != 1.0:
output_dir += ".m{}".format(args.mul)
if args.join_mode != JoinMode.AVERAGE:
output_dir += ".j{}".format(args.join_mode.value)
if args.if_octaves != 1:
output_dir += ".ifo%d" % args.if_octaves
output_dir += ".{}x{}".format(*output_size)
suffix = 0
base_output_dir = output_dir
while os.path.exists(output_dir):
output_dir = base_output_dir + ".{}".format(suffix)
suffix += 1
if suffix > 100:
print("Hmm, maybe in an infinite loop trying to create output directory")
sys.exit(1)
try:
os.mkdir(output_dir)
except OSError:
print("Hmm, failed to make output directory... race condition?")
sys.exit(1)
# Save the command for good measure
with open(os.path.join(output_dir, "Acommand.txt"), 'w') as f:
f.write(' '.join(sys.argv))
width = args.output_width
height = args.output_height or width
print("About to generate a {}x{} image, matching the Gram matrices for layers {} at {} distinct scales".format(width, height, args.layers, args.octaves))
pyramid_model = gram.make_pyramid_model(args.octaves, args.padding_mode)
pyramid_gram_model = gram.make_pyramid_gram_model(pyramid_model, args.layers, data_dir=args.data_dir)
target_grams = gram.get_gram_matrices_for_images(pyramid_gram_model, args.source,
source_width = args.source_width, source_scale = args.source_scale, join_mode = args.join_mode)
target_grams = [t*args.mul for t in target_grams]
#target_grams = [np.max(t) - t for t in target_grams]
x0 = np.random.randn(args.count, height, width, 3)
if args.seed == 'symmetric':
x0 = x0 + x0[:,::-1, :, :]
x0 = x0 + x0[:, :, ::-1, :]
blur_radius = 30
for i in range(3):
x0[...,i] = blur_radius*50*ndimage.gaussian_filter(x0[...,i], blur_radius)
x0 += np.random.randn(*(x0.shape)) * 2
else:
# Shift the whole thing to be near zero
x0 += 10 - gram.colour_offsets
#x0 = preprocess(load_img('../sources/smokeb768.jpg'))
interframe_distances = []
if args.count > 1:
for im in gram.get_images(args.source, source_scale = args.source_scale, source_width=args.source_width):
interframe_distances.append(gram.interframe_distance(pyramid_model, im,
shift=args.if_shift,
interframe_distance_type = args.if_distance_type,
interframe_octaves = args.if_octaves))
print("Raw interframe distances: ")
print(interframe_distances)
#target_distances = np.mean(interframe_distances, axis=1)
target_distances = interframe_distances[0]
print("Shifting the source images by {} gives a {} interframe distance of approx {}".format(args.if_shift, args.if_distance_type, target_distances))
else:
target_distances=None
gram.synthesize_animation(pyramid_model, pyramid_gram_model, target_grams,
width = width, height = height, frame_count=args.count,
x0 = x0,
interframe_loss_weight=args.if_weight,
interframe_order=args.if_order,
target_interframe_distances = target_distances,
interframe_distance_type = args.if_distance_type,
interframe_octaves = args.if_octaves,
output_directory = output_dir, max_iter=args.max_iter, save_every=args.save_every, tol=args.tol
)
print("DONE: ")
| {
"content_hash": "23690b36f439ff7f2a4b777e6d62a4a2",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 157,
"avg_line_length": 48.679245283018865,
"alnum_prop": 0.6391472868217054,
"repo_name": "wxs/subjective-functions",
"id": "9120aed6e217f2a190ce94b89ad124cca2bcea07",
"size": "7775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synthesize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28361"
}
],
"symlink_target": ""
} |
import numpy
import pylab
def signal_power_test(raw_validation_data_set, training_set, validation_set, pred_act, pred_val_act,display=False):
signal_power=[]
noise_power=[]
normalized_noise_power=[]
signal_power_variance=[]
for i in xrange(0,len(raw_validation_data_set)):
(sp,np,nnp,spv) = signal_and_noise_power(raw_validation_data_set[i])
signal_power.append(sp)
noise_power.append(np)
normalized_noise_power.append(nnp)
signal_power_variance.append(spv)
significant = numpy.mat(numpy.nonzero(((numpy.array(signal_power) - 0.5*numpy.array(signal_power_variance)) > 0.0)*1.0)).getA1()
print numpy.shape(training_set)
print numpy.shape(numpy.var(training_set,axis=0))
print numpy.shape(numpy.var(pred_act - training_set,axis=0))
print numpy.shape(signal_power)
training_prediction_power=numpy.divide(numpy.var(training_set,axis=0) - numpy.var(pred_act - training_set,axis=0), signal_power)
validation_prediction_power=numpy.divide(numpy.var(validation_set,axis=0) - numpy.var(pred_val_act - validation_set,axis=0), signal_power)
if display:
pylab.figure()
pylab.subplot(131)
pylab.title('distribution of estimated signal power in neurons')
pylab.errorbar(noise_power,signal_power,fmt='ro',yerr=signal_power_variance)
#pylab.errorbar(numpy.array(noise_power)[significant],numpy.array(signal_power)[significant],fmt='bo',yerr=numpy.array(signal_power_variance)[significant])
pylab.ylabel('signal power')
pylab.xlabel('noise power')
pylab.subplot(132)
pylab.title('distribution of estimated prediction power ')
pylab.plot(numpy.array(normalized_noise_power)[significant],numpy.array(training_prediction_power)[significant],'ro',label='training')
pylab.plot(numpy.array(normalized_noise_power)[significant],numpy.array(validation_prediction_power)[significant],'bo',label='validation')
pylab.axis([20.0,100.0,-2.0,2.0])
pylab.xlabel('normalized noise power')
pylab.ylabel('prediction power')
pylab.legend()
pylab.subplot(133)
pylab.title('relationship between test set prediction power \n and validation prediction power')
pylab.plot(validation_prediction_power[significant],training_prediction_power[significant],'ro')
pylab.axis([-2.0,2.0,0.0,2.0])
pylab.xlabel('validation set prediction power')
pylab.ylabel('test set prediction power')
return (signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance)
def signal_and_noise_power(responses):
(trials,n) = numpy.shape(responses)
sp = (1 / (trials-1.0)) * (trials * numpy.var(numpy.mean(responses,axis=0)) - numpy.mean(numpy.var(responses,axis=1)))
np = numpy.mean(numpy.var(responses,axis=1)) - sp
nnp = (numpy.mean(numpy.var(responses,axis=1)) - sp) / numpy.mean(numpy.var(responses,axis=1)) * 100
ni = numpy.mean(numpy.mat(responses),0)
nni = numpy.mean(ni)
noise = responses - numpy.tile(ni,(trials,1))
Cov = numpy.mat(numpy.mat(noise)).T * numpy.mat(numpy.mat(noise))
s = numpy.mean(Cov,0)
ss = numpy.mean(s)
spv = numpy.sum((4.0/trials) * ((ni*Cov*ni.T)/(n*n) - 2*nni*ni*s.T/n + nni*nni*ss) + 2.0/(trials*(trials-1))*( numpy.trace(Cov*Cov)/(n*n) - (2.0/n)*s*s.T + ss*ss))
return (sp,np,nnp,spv)
def estimateNoise(trials):
(num_neurons,num_trials,num_resp) = numpy.shape(trials)
mean_responses = numpy.mean(trials,1)
for i in xrange(0,10):
pylab.figure()
pylab.subplot(3,3,2)
pylab.hist(mean_responses[i,:])
bins = numpy.arange(numpy.min(mean_responses[i,:]),numpy.max(mean_responses[i,:]) + (numpy.max(mean_responses[i,:])+0.00001-numpy.min(mean_responses[i,:]))/5.0,( numpy.max(mean_responses[i,:])+0.00001-numpy.min(mean_responses[i,:]))/5.0)
print numpy.min(mean_responses[i,:])
print numpy.max(mean_responses[i,:])
print bins
#membership = numpy.zeros(numpy.shape(mean_responses[i,:]))
for j in xrange(0,5):
membership = numpy.nonzero(numpy.array(((mean_responses[i,:] >= bins[j]) & (mean_responses[i,:] < bins[j+1]))))
raw_responses = trials[i,:,membership].flatten()
pylab.subplot(3,3,3+j+1)
if(len(raw_responses) != 0):
pylab.hist(raw_responses)
pylab.xlabel(str(bins[j])+'-'+str(bins[j+1]))
| {
"content_hash": "ce2ba1c40efd1992d695001c3cbf0b4d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 238,
"avg_line_length": 47.032967032967036,
"alnum_prop": 0.6976635514018692,
"repo_name": "ioam/svn-history",
"id": "c27fb6481b897b0629df4a76ac29c7aae8fe595d",
"size": "4280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/JanA/noiseEstimation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Elixir",
"bytes": "202"
},
{
"name": "Emacs Lisp",
"bytes": "21378"
},
{
"name": "JavaScript",
"bytes": "12602"
},
{
"name": "PHP",
"bytes": "596890"
},
{
"name": "Perl",
"bytes": "43403"
},
{
"name": "Python",
"bytes": "3334771"
},
{
"name": "Shell",
"bytes": "9260"
},
{
"name": "Tcl",
"bytes": "433956"
}
],
"symlink_target": ""
} |
class OtherEndpointsMixin:
"""
Various endpoints dealing with the Showroom service and menus
"""
def avatar_server_settings(self):
"""
Some info about where avatars are stored, e.g.:
{
"version": 41,
"path": "https://image.showroom-live.com/showroom-prod/image/avatar/",
"f_ext": "png"
}
:return:
"""
endpoint = "/api/avatar/server_settings"
result = self._api_get(endpoint)
return result
def radio_images(self):
"""
A list of background images for radio broadcasts
:return:
"""
endpoint = "/api/radio_images"
result = self._api_get(endpoint)
return result.get('radio_images')
def service_settings(self):
"""
Global(?) default settings for showroom streams
Includes avatar_server_settings in the avatar_url field
:return:
"""
endpoint = "/api/service_settings/"
result = self._api_get(endpoint)
return result
def side_navigation_menu(self):
"""
Gets contents of the side navigation menu, including language specific labels
Three fields, the main one of interest is menu_list
:return:
"""
endpoint = "/api/menu/side_navi"
result = self._api_get(endpoint)
return result
def broadcast_menu(self):
"""
No idea. Just returns {"menu_list":[]} for me. Maybe only returns something if you are streaming?
:return:
"""
endpoint = "/api/menu/broadcast"
result = self._api_get(endpoint)
return result
def talks(self):
"""
Get lists of talks
Three lists, of popular, live, and followed talks
Formatted for display, so included in the lists are headers and messages to the user
:return:
"""
endpoint = "/api/talk/talks"
result = self._api_get(endpoint)
return result.get('talks')
def time_tables(self, started_at=None, order=None):
"""
:param started_at: int
:param order: str
:return:
"""
# TODO: find out what valid values for order are. NEXT/PREV?
endpoint = "/api/time_table/time_tables"
result = self._api_get(endpoint, params=dict(started_at=started_at, order=order))
return result
| {
"content_hash": "00ce85d509684df13cfcb1e1bc6d50c5",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 105,
"avg_line_length": 29.186046511627907,
"alnum_prop": 0.5490039840637451,
"repo_name": "wlerin/showroom",
"id": "4f272dc9b96661ac5b6ab804eb3aef162addf092",
"size": "2512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "showroom/api/endpoints/other.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "294752"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .value import Value, URI
from ..exceptions import IllegalArgumentException
from ..util import strings
from ..vocabulary.xmlschema import XMLSchema
import datetime
from collections import defaultdict
class Literal(Value):
"""
Lightweight implementation of the Literal class. Think 'LiteralBase'.
Implementer note: If this is still too heavyweight, subclass but don't
call super.__init__. That's why Python is so cool!
"""
def __init__(self, label, datatype=None, language=None):
Value.__init__(self)
# Use the properties to set the real values
self.datatype = datatype
self.language = language
self.label = label
ISO_FORMAT_WITH_T = "%Y-%m-%dT%H:%M:%S"
def getDatatype(self):
"""The URI representing the datatype for this literal, if there is one"""
return self._datatype
def setDatatype(self, datatype):
"""Sets the datatype of the value"""
if isinstance(datatype, str):
if datatype[0] == '<':
datatype = datatype[1:-1]
datatype = XMLSchema.uristr2obj.get(datatype, None) or URI(datatype)
elif datatype is not None:
if not isinstance(datatype, URI):
datatype = URI(datatype)
elif datatype.uri is None:
datatype = None
self._datatype = datatype # pylint: disable-msg=W0201
datatype = property(getDatatype, setDatatype)
def getLanguage(self):
"""The language for this Literal"""
return self._language
def setLanguage(self, language):
"""Set the language for this Literal"""
self._language = language.lower() if language else None # pylint: disable-msg=W0201
language = property(getLanguage, setLanguage)
def getLabel(self):
"""The label/value for this Literal"""
return self._label
def setLabel(self, label):
"""Set the label for this Literal"""
self._label = label # pylint: disable-msg=W0201
def getValue(self):
"""The label/value"""
return self.label
label = property(getLabel, setLabel)
def __eq__(self, other):
if not isinstance(other, Literal):
return NotImplemented
return (self.label == other.label and
self.datatype == other.datatype and
self.language == other.language)
def __hash__(self):
return hash(self._label)
def intValue(self):
"""Convert to int"""
return int(self._label)
def longValue(self):
"""Convert to long"""
return long(self._label)
def floatValue(self):
"""Convert to float"""
return float(self._label)
def booleanValue(self):
"""Convert to bool"""
return bool(self._label)
def dateValue(self):
"""Convert to date"""
return datetime.datetime.strptime(self._label, "%Y-%m-%d").date()
def datetimeValue(self):
"""Convert to datetime"""
return datetime.datetime.strptime(self._label, Literal.ISO_FORMAT_WITH_T)
def timeValue(self):
"""Convert to time"""
## THIS IS GOING TO BREAK:
return datetime.time(self._label)
## Returns the {@link XMLGregorianCalendar} value of this literal. A calendar
## representation can be given for literals whose label conforms to the
## syntax of the following <a href="http://www.w3.org/TR/xmlschema-2/">XML
## Schema datatypes</a>: <tt>dateTime</tt>, <tt>time</tt>,
## <tt>date</tt>, <tt>gYearMonth</tt>, <tt>gMonthDay</tt>,
## <tt>gYear</tt>, <tt>gMonth</tt> or <tt>gDay</tt>.
def calendarValue(self):
"""calendarValue not useful for Python."""
raise NotImplementedError("calendarValue")
def toNTriples(self):
"""Return an ntriples syntax for this Literal"""
sb = []
sb.append('"')
sb.append(strings.escape_double_quotes(self.getLabel()))
sb.append('"')
if self.language:
sb.append('@')
sb.append(self.language)
if self.datatype:
sb.append("^^")
sb.append(self.datatype.toNTriples())
return ''.join(sb)
###############################################################################
## Automatic conversion from Literal to Python object
###############################################################################
def toPython(self):
"""
Return a Python object representation of this literal.
Slightly silly implementation because we implement a conversion table
and then don't use the conversion functions.
"""
return XSDToPython[getattr(self.datatype, "uri", None)](self)
XSDToPython = defaultdict(lambda: Literal.getValue, [
(XMLSchema.INT.uri, Literal.intValue),
(XMLSchema.FLOAT.uri, Literal.floatValue),
(XMLSchema.LONG.uri, Literal.longValue),
(XMLSchema.BOOLEAN.uri, Literal.booleanValue),
(XMLSchema.DATETIME.uri, Literal.datetimeValue),
(XMLSchema.DATE.uri, Literal.dateValue),
(XMLSchema.TIME.uri, Literal.timeValue)])
###############################################################################
# Extension to Sesame API
###############################################################################
class CompoundLiteral(Literal):
"""
A compound literal represents a range, a geospatial coordinate,
or other useful compound structure.
TODO: FIGURE OUT SYNTAX FOR OTHER TYPES. INSURE THAT
THE SYNTAX FOR A RANGE DOESN'T CONFLICT/OVERLAP
"""
RANGE_LITERAL = 'rangeLiteral'
def __init__(self, choice, lowerBound=None, upperBound=None):
self.choice = choice
if choice == CompoundLiteral.RANGE_LITERAL:
self.lowerBound = lowerBound # should be a LiteralImpl
self.upperBound = upperBound # should be a LiteralImpl
## other compound types go here.
else:
raise IllegalArgumentException("Can't interpret the choice '%s' of a compound literal." % choice)
def isRangeLiteral(self):
return self.choice == CompoundLiteral.RANGE_LITERAL
def getLowerBound(self):
return self.lowerBound
def getUpperBound(self):
return self.upperBound
class RangeLiteral(CompoundLiteral):
"""
A range represents an interval between to scalar values.
"""
def __init__(self, lowerBound=None, upperBound=None):
self.lowerBound = lowerBound # should be a LiteralImpl
self.upperBound = upperBound # should be a LiteralImpl
def getLowerBound(self):
return self.lowerBound
def getUpperBound(self):
return self.upperBound
class GeoCoordinate(CompoundLiteral):
"""
Define either a cartesian coordinate or a spherical coordinate. For the
latter, nit can be 'km', 'mile', 'radian', or 'degree'
"""
def __init__(self, x, y, unit=None, geoType=None):
self.xcoor = x
self.ycoor = y
self.unit = unit
self.geoType = geoType
def __str__(self): return "|COOR|(%i, %i)" % (self.xcoor, self.ycoor)
class GeoSpatialRegion(CompoundLiteral):
pass
class GeoBox(GeoSpatialRegion):
def __init__(self, xMin, xMax, yMin, yMax, unit=None, geoType=None):
self.xMin = xMin
self.xMax = xMax
self.yMin = yMin
self.yMax = yMax
self.unit = unit
self.geoType = geoType
def __str__(self): return "|Box|%s,%s %s,%s" % (self.xMin, self.xMax, self.yMin, self.yMax)
class GeoCircle(GeoSpatialRegion):
def __init__(self, x, y, radius, unit=None, geoType=None):
self.x = x
self.y = y
self.radius = radius
self.unit = unit
self.geoType=geoType
def __str__(self): return "|Circle|%i,%i, radius=%i" % (self.x, self.y, self.radius)
class GeoPolygon(GeoSpatialRegion):
def __init__(self, vertices, uri=None, geoType=None):
self.vertices = vertices
self.geoType = geoType
self.uri = uri
self.resource = None
self.miniPolygon = None
def getVertices(self): return self.vertices
def getResource(self): return self.resource
def __str__(self): return "|Polygon|%s" % self.vertices
| {
"content_hash": "906704ecde0d42621684300fb6ec7447",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 109,
"avg_line_length": 33.51968503937008,
"alnum_prop": 0.5863284002818887,
"repo_name": "mpetyx/pychatbot",
"id": "05b09d05ebf6e7e08f057f027fd9893f4f8f4fed",
"size": "9326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/openrdf/model/literal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "99757"
},
{
"name": "C++",
"bytes": "1736027"
},
{
"name": "CSS",
"bytes": "287248"
},
{
"name": "D",
"bytes": "5487330"
},
{
"name": "Java",
"bytes": "4140"
},
{
"name": "JavaScript",
"bytes": "8460"
},
{
"name": "Objective-C",
"bytes": "39"
},
{
"name": "PHP",
"bytes": "4179"
},
{
"name": "Perl",
"bytes": "40530"
},
{
"name": "Python",
"bytes": "943590"
},
{
"name": "Shell",
"bytes": "175258"
},
{
"name": "TeX",
"bytes": "234627"
},
{
"name": "XSLT",
"bytes": "4027675"
}
],
"symlink_target": ""
} |
"""Unit tests for the pipeline options module."""
import logging
import unittest
import hamcrest as hc
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
class PipelineOptionsTest(unittest.TestCase):
def tearDown(self):
# Clean up the global variable used by RuntimeValueProvider
RuntimeValueProvider.runtime_options = None
TEST_CASES = [
{'flags': ['--num_workers', '5'],
'expected': {'num_workers': 5, 'mock_flag': False, 'mock_option': None},
'display_data': [DisplayDataItemMatcher('num_workers', 5)]},
{
'flags': [
'--profile_cpu', '--profile_location', 'gs://bucket/', 'ignored'],
'expected': {
'profile_cpu': True, 'profile_location': 'gs://bucket/',
'mock_flag': False, 'mock_option': None},
'display_data': [
DisplayDataItemMatcher('profile_cpu',
True),
DisplayDataItemMatcher('profile_location',
'gs://bucket/')]
},
{'flags': ['--num_workers', '5', '--mock_flag'],
'expected': {'num_workers': 5, 'mock_flag': True, 'mock_option': None},
'display_data': [
DisplayDataItemMatcher('num_workers', 5),
DisplayDataItemMatcher('mock_flag', True)]
},
{'flags': ['--mock_option', 'abc'],
'expected': {'mock_flag': False, 'mock_option': 'abc'},
'display_data': [
DisplayDataItemMatcher('mock_option', 'abc')]
},
{'flags': ['--mock_option', ' abc def '],
'expected': {'mock_flag': False, 'mock_option': ' abc def '},
'display_data': [
DisplayDataItemMatcher('mock_option', ' abc def ')]
},
{'flags': ['--mock_option= abc xyz '],
'expected': {'mock_flag': False, 'mock_option': ' abc xyz '},
'display_data': [
DisplayDataItemMatcher('mock_option', ' abc xyz ')]
},
{'flags': ['--mock_option=gs://my bucket/my folder/my file'],
'expected': {'mock_flag': False,
'mock_option': 'gs://my bucket/my folder/my file'},
'display_data': [
DisplayDataItemMatcher(
'mock_option', 'gs://my bucket/my folder/my file')]
},
]
# Used for testing newly added flags.
class MockOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--mock_flag', action='store_true', help='mock flag')
parser.add_argument('--mock_option', help='mock option')
parser.add_argument('--option with space', help='mock option with space')
def test_display_data(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptions(flags=case['flags'])
dd = DisplayData.create_from(options)
hc.assert_that(dd.items, hc.contains_inanyorder(*case['display_data']))
def test_get_all_options(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptions(flags=case['flags'])
self.assertDictContainsSubset(case['expected'], options.get_all_options())
self.assertEqual(options.view_as(
PipelineOptionsTest.MockOptions).mock_flag,
case['expected']['mock_flag'])
self.assertEqual(options.view_as(
PipelineOptionsTest.MockOptions).mock_option,
case['expected']['mock_option'])
def test_from_dictionary(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptions(flags=case['flags'])
all_options_dict = options.get_all_options()
options_from_dict = PipelineOptions.from_dictionary(all_options_dict)
self.assertEqual(options_from_dict.view_as(
PipelineOptionsTest.MockOptions).mock_flag,
case['expected']['mock_flag'])
self.assertEqual(options.view_as(
PipelineOptionsTest.MockOptions).mock_option,
case['expected']['mock_option'])
def test_option_with_space(self):
options = PipelineOptions(flags=['--option with space= value with space'])
self.assertEqual(
getattr(options.view_as(PipelineOptionsTest.MockOptions),
'option with space'), ' value with space')
options_from_dict = PipelineOptions.from_dictionary(
options.get_all_options())
self.assertEqual(
getattr(options_from_dict.view_as(PipelineOptionsTest.MockOptions),
'option with space'), ' value with space')
def test_override_options(self):
base_flags = ['--num_workers', '5']
options = PipelineOptions(base_flags)
self.assertEqual(options.get_all_options()['num_workers'], 5)
self.assertEqual(options.get_all_options()['mock_flag'], False)
options.view_as(PipelineOptionsTest.MockOptions).mock_flag = True
self.assertEqual(options.get_all_options()['num_workers'], 5)
self.assertTrue(options.get_all_options()['mock_flag'])
def test_experiments(self):
options = PipelineOptions(['--experiment', 'abc', '--experiment', 'def'])
self.assertEqual(
sorted(options.get_all_options()['experiments']), ['abc', 'def'])
options = PipelineOptions(['--experiments', 'abc', '--experiments', 'def'])
self.assertEqual(
sorted(options.get_all_options()['experiments']), ['abc', 'def'])
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['experiments'], None)
def test_extra_package(self):
options = PipelineOptions(['--extra_package', 'abc',
'--extra_packages', 'def',
'--extra_packages', 'ghi'])
self.assertEqual(
sorted(options.get_all_options()['extra_packages']),
['abc', 'def', 'ghi'])
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['extra_packages'], None)
def test_dataflow_job_file(self):
options = PipelineOptions(['--dataflow_job_file', 'abc'])
self.assertEqual(options.get_all_options()['dataflow_job_file'], 'abc')
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['dataflow_job_file'], None)
def test_template_location(self):
options = PipelineOptions(['--template_location', 'abc'])
self.assertEqual(options.get_all_options()['template_location'], 'abc')
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['template_location'], None)
def test_redefine_options(self):
class TestRedefinedOptios(PipelineOptions): # pylint: disable=unused-variable
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--redefined_flag', action='store_true')
class TestRedefinedOptios(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--redefined_flag', action='store_true')
options = PipelineOptions(['--redefined_flag'])
self.assertTrue(options.get_all_options()['redefined_flag'])
# TODO(BEAM-1319): Require unique names only within a test.
# For now, <file name acronym>_vp_arg<number> will be the convention
# to name value-provider arguments in tests, as opposed to
# <file name acronym>_non_vp_arg<number> for non-value-provider arguments.
# The number will grow per file as tests are added.
def test_value_provider_options(self):
class UserOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument(
'--pot_vp_arg1',
help='This flag is a value provider')
parser.add_value_provider_argument(
'--pot_vp_arg2',
default=1,
type=int)
parser.add_argument(
'--pot_non_vp_arg1',
default=1,
type=int
)
# Provide values: if not provided, the option becomes of the type runtime vp
options = UserOptions(['--pot_vp_arg1', 'hello'])
self.assertIsInstance(options.pot_vp_arg1, StaticValueProvider)
self.assertIsInstance(options.pot_vp_arg2, RuntimeValueProvider)
self.assertIsInstance(options.pot_non_vp_arg1, int)
# Values can be overwritten
options = UserOptions(pot_vp_arg1=5,
pot_vp_arg2=StaticValueProvider(value_type=str,
value='bye'),
pot_non_vp_arg1=RuntimeValueProvider(
option_name='foo',
value_type=int,
default_value=10))
self.assertEqual(options.pot_vp_arg1, 5)
self.assertTrue(options.pot_vp_arg2.is_accessible(),
'%s is not accessible' % options.pot_vp_arg2)
self.assertEqual(options.pot_vp_arg2.get(), 'bye')
self.assertFalse(options.pot_non_vp_arg1.is_accessible())
with self.assertRaises(RuntimeError):
options.pot_non_vp_arg1.get()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "e3ddaa9d63250d28fc2a0fff62910bb8",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 82,
"avg_line_length": 40.68122270742358,
"alnum_prop": 0.6215113782739373,
"repo_name": "wangyum/beam",
"id": "66c69bdfc70ea09bb9c6b19d42c6effd571b50ec",
"size": "10101",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/options/pipeline_options_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "109377"
},
{
"name": "Groovy",
"bytes": "84452"
},
{
"name": "Java",
"bytes": "14117162"
},
{
"name": "Python",
"bytes": "3165393"
},
{
"name": "Shell",
"bytes": "55385"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Talk.auditorium'
db.add_column(u'core_talk', 'auditorium',
self.gf('django.db.models.fields.CharField')(default='Alberto Barajas Celis', max_length=140),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Talk.auditorium'
db.delete_column(u'core_talk', 'auditorium')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.event': {
'Meta': {'object_name': 'Event'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'title'"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Speaker']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'core.speaker': {
'Meta': {'object_name': 'Speaker'},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '225', 'blank': 'True'}),
'education': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ocupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'core.talk': {
'Meta': {'object_name': 'Talk', '_ormbases': [u'core.Event']},
'auditorium': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Event']", 'unique': 'True', 'primary_key': 'True'}),
'length': ('django.db.models.fields.PositiveIntegerField', [], {}),
'schedule': ('django.db.models.fields.DateTimeField', [], {})
},
u'core.tutorial': {
'Meta': {'object_name': 'Tutorial', '_ormbases': [u'core.Event']},
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Event']", 'unique': 'True', 'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.tutorialenrollment': {
'Meta': {'unique_together': "(('mail', 'tutorial'),)", 'object_name': 'TutorialEnrollment'},
'code': ('django.db.models.fields.CharField', [], {'default': "'avufoq'", 'max_length': '6'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enrolled_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Tutorial']"})
},
u'core.tutorialsession': {
'Meta': {'object_name': 'TutorialSession'},
'classroom': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.PositiveIntegerField', [], {}),
'schedule': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Tutorial']"})
}
}
complete_apps = ['core'] | {
"content_hash": "6ba023cbd46aa0388bc78f7b2a8a3fe2",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 195,
"avg_line_length": 69.78899082568807,
"alnum_prop": 0.5490995136058893,
"repo_name": "eseca/congresoECC",
"id": "5c6c577da96a7233784c87d75a8701792d5ff2d2",
"size": "7631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congresoECC/apps/core/migrations/0002_auto__add_field_talk_auditorium.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "22361"
},
{
"name": "JavaScript",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "88872"
}
],
"symlink_target": ""
} |
"""
Ferris event class
"""
import logging
import bisect
class Event(object):
"""
Provides a simple interface for slot/signal event system.
Example:
myevent = Event()
myevent += some_handler_function
myevent()
"""
def __init__(self, name=None):
self.handlers = []
self.name = name
def handle(self, handler, priority=0):
"""
Add a handler function to this event. You can also use +=
"""
bisect.insort(self.handlers, (priority, handler))
return self
def unhandle(self, handler, priority=0):
"""
Remove a handler function from this event. If it's not in the
list, it'll raise a ValueError.
"""
try:
self.handlers.remove((priority, handler))
except:
raise ValueError("Handler is not handling this event, so cannot unhandle it.")
return self
def fire(self, *args, **kargs):
"""
Trigger all of the event handlers for this event. Arguments
are passed through. You can also use self().
"""
#logging.debug('Event %s firing %s listeners' % (self.name, self.handlers))
results = []
for p, handler in self.handlers:
results.append(handler(*args, **kargs))
return results
def getHandlerCount(self):
return len(self.handlers)
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = getHandlerCount
class NamedEvents(object):
"""
A simple container of events.
Example:
events = NamedEvents()
events.myevent += somefunction()
events.myevent.fire()
"""
def __init__(self):
self._events = {}
def getEvent(self, name):
if hasattr(self, name):
return getattr(self, name)
return self.getEventNoAttr(name)
def getEventNoAttr(self, name):
if not name in self._events:
self._events[name] = Event(name=name)
return self._events[name]
def setEventNoAttr(self, name, value):
if not isinstance(value, Event):
object.__setattr__(self, name, value)
self._events[name] = value
def setEvent(self, name, value):
if hasattr(self, name):
setattr(self, name, value)
self.setEventNoAttr(name, value)
def clear(self):
self._events.clear()
__getitem__ = getEvent
__setitem__ = setEvent
__getattr__ = getEventNoAttr
__setattr__ = setEventNoAttr
| {
"content_hash": "ee1a7dd9dc55c40996866516c524b507",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 90,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.5749802683504341,
"repo_name": "yowmamasita/social-listener-exam",
"id": "756ac3faa8a4df13c3445ff5a78e0d41e8274afe",
"size": "2534",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ferris/core/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "197882"
},
{
"name": "Shell",
"bytes": "2268"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from app.core.models import *
from app.core.forms import *
def get_current_user(request):
user = request.user
user.refresh_from_db()
return user
def home(request):
if (request.user.is_authenticated()):
return redirect('private')
return render(request, 'home.html')
def inicio(request):
return render(request, 'inicio.html')
def registrate(request):
return render(request, 'registrate.html')
def post_list(request):
return render(request, 'post_list.html')
@login_required
def private(request):
user = request.user
user.refresh_from_db()
return render(request, 'private.html', {'user': user})
def registro_desocupado(request):
# Cuando algo llega a esta vista (llamada desde una URL) puede venir por dos
# vias distintas. Como una petición GET (Se ingresó en la barra de direccion
# del navegador la URL o se siguió un link a esa URL) o como POST (Se envió
# un formulario a esa dirección). Por tanto tengo que procesar ambas
# alternativas.
if request.method == "GET":
# Como es GET solo debo mostrar la página. Llamo a otra función que se
# encargará de eso.
return get_registro_desocupado_form(request)
elif request.method == 'POST':
# Como es POST debo procesar el formulario. Llamo a otra función que se
# encargará de eso.
return handle_registro_desocupado_form(request)
def get_registro_desocupado_form(request):
form = RegistroDesocupado()
return render(request, 'signup.html', {'form': form})
def handle_registro_desocupado_form(request):
form = RegistroDesocupado(request.POST)
# Cuando se crea un formulario a partir del request, ya se obtienen a traves
# de este elemento los datos que el usuario ingresó. Como el formulario de
# Django ya está vinculado a la entidad, entonces hacer form.save() ya crea
# un elemento en la base de datos.
if form.is_valid():
# Primero hay que verificar si el formulario es válido, o sea, si los
# datos ingresados son correctos. Sino se debe mostrar un error.
form.save()
# Si se registró correctamente, se lo envía a la pantalla de login
return redirect('login')
else:
# Quedarse en la misma página y mostrar errores
return render(request, 'signup.html', {'form': form})
def registro_empresa(request):
if request.method == "GET":
return get_registro_empresa_form(request)
elif request.method == 'POST':
return handle_registro_empresa_form(request)
def get_registro_empresa_form(request):
form = RegistroEmpresa()
return render(request, 'signup.html', {'form': form})
def handle_registro_empresa_form(request):
form = RegistroEmpresa(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
return render(request, 'signup.html', {'form': form})
@login_required
def edit_user(request):
if request.method == "GET":
return get_edit_user(request, request.user.id)
elif request.method == "POST":
return handler_edit_user(request, request.user.id)
def get_edit_user(request, pk):
user = request.user
user.refresh_from_db()
if request.user.is_desocupado():
form = EditarDesocupado(instance= User.objects.get(id=request.user.id).desocupado)
else:
form = EditarEmpresa(instance= User.objects.get(id=request.user.id).empresa)
return render(request, 'edit_user.html', {'form': form})
def handler_edit_user(request, pk):
user = request.user
user.refresh_from_db()
if request.user.is_desocupado():
form = EditarDesocupado(request.POST, instance= User.objects.get(id=request.user.id).desocupado)
else:
form = EditarEmpresa(request.POST, instance= User.objects.get(id=request.user.id).empresa)
if form.is_valid():
form.save()
return redirect('edit_user')
else:
return render(request, 'edit_user.html', {'form': form})
@login_required
def user_delete(request):
User.objects.get(id=request.user.id).delete()
return redirect('logout')
def registrarOfertaDeTrabajo(request):
if request.method == "GET":
return get_registrarOfertaDeTrabajo_form(request)
elif request.method == 'POST':
return handle_registrarOfertaDeTrabajo_form(request)
def get_registrarOfertaDeTrabajo_form(request):
form = RegistrarOfertaDeTrabajo()
return render(request, 'oferta de trabajo.html', {'form': form})
def handle_registrarOfertaDeTrabajo_form(request):
form = RegistrarOfertaDeTrabajo(request.POST)
if form.is_valid():
form.save()
return redirect('lista_ofertas')
else:
return render(request, 'oferta de trabajo.html', {'form': form})
@login_required
def edit_oferta(request, pk):
if request.method == "GET":
return get_edit_oferta(request, pk)
elif request.method == 'POST':
return handler_edit_oferta(request, pk)
def get_edit_oferta(request, pk):
form = EditarOferta(instance=OfertaDeTrabajo.objects.get(id=pk))
return render(request, 'edit_oferta.html', {'form': form, 'user': get_current_user(request)})
def handler_edit_oferta(request, pk):
form = EditarOferta(request.POST, instance=OfertaDeTrabajo.objects.get(id=pk))
if form.is_valid():
form.save()
return redirect('lista_ofertas')
else:
return render(request, 'edit_oferta.html', {'form': form, 'user': get_current_user(request)})
@login_required
def oferta_delete(request, pk):
OfertaDeTrabajo.objects.get(id=pk).delete()
return redirect('lista_ofertas')
@login_required
def lista_ofertas(request):
ofertasvar = OfertaDeTrabajo.objects.all()
return render(request, 'lista_ofertas.html', {'ofertas': ofertasvar})
| {
"content_hash": "ecc170f6597efd632555bcc3aba298d5",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 104,
"avg_line_length": 35.467455621301774,
"alnum_prop": 0.6885218551885218,
"repo_name": "NatashaKrassevich/colocaciones",
"id": "3223394b4bbe53262d63404caa472d46edb40cb0",
"size": "6010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23815"
},
{
"name": "HTML",
"bytes": "12136"
},
{
"name": "JavaScript",
"bytes": "40669"
},
{
"name": "PHP",
"bytes": "1218"
},
{
"name": "Python",
"bytes": "27049"
}
],
"symlink_target": ""
} |
import numpy
import theano.tensor as T
from pylearn2.models.model import Model
from pylearn2.space import VectorSpace
from pylearn2.utils import sharedX
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
class CNNCost(DefaultDataSpecsMixin, Cost):
supervised = True
def expr(self, model, data, **kwargs):
space, source = self.get_data_specs(model)
space.validate(data)
inputs, targets = data
outputs = model.cnn_output(inputs)
loss = -(targets * T.log(outputs)).sum(axis=1)
return loss.mean()
class CNN(Model):
"""
W1: [nvis * nhid]
b: [nhid]
W2: [nhid * 1]
c: [1]
"""
def __init__(self, nvis, nhid, nclasses):
super(CNN, self).__init__()
self.nvis = nvis
self.nhid = nhid
self.nclasses = nclasses
W1_value = numpy.random.uniform(size=(self.nvis, self.nhid))
b_value = numpy.random.uniform(size=(self.nhid))
W2_value = numpy.random.uniform(size=(self.nhid, nclasses))
c_value = numpy.random.uniform(size=(nclasses))
self.W1 = sharedX(W1_value, 'W1')
self.W2 = sharedX(W2_value, 'W2')
self.b = sharedX(b_value, 'b')
self.c = sharedX(c_value, 'c')
self._params = [self.W1, self.W2, self.b, self.c]
self.input_space = VectorSpace(dim=self.nvis)
self.output_space = VectorSpace(dim=self.nclasses)
def cnn_output(self, X):
h = T.tanh(T.dot(X, self.W1) + self.b)
o = T.tanh(T.dot(h, self.W2) + self.c)
return T.nnet.softmax(o)
| {
"content_hash": "8767385ff98da2057ca52a0b7318ab6e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 62,
"avg_line_length": 26.471698113207548,
"alnum_prop": 0.6792587312900926,
"repo_name": "SolessChong/kaggle-mnist",
"id": "cc2c54fe814a723579488471ffcf9ee671acae6e",
"size": "1403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "CNN/cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5149"
}
],
"symlink_target": ""
} |
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
SQL_SCALAR_PANDAS_ITER_UDF = 204
SQL_MAP_PANDAS_ITER_UDF = 205
SQL_COGROUPED_MAP_PANDAS_UDF = 206
SQL_MAP_ARROW_ITER_UDF = 207
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info):
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
port = sock_info[0]
auth_secret = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info, serializer):
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : :py:class:`Serializer`
The PySpark serializer to use
Returns
-------
result of :py:meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info, serializer):
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info, _serializer):
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self):
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self):
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD:
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(CPickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. deprecated:: 0.9.0
use :py:meth:`RDD.mapPartitionsWithIndex` instead.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split RDDs in a list
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(
self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x
):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether or not to check the return value of the shell command.
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
x.rstrip(b"\n").decode("utf-8")
for x in chain(iter(pipe.stdout.readline, b""), check_return_code())
)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(self, groupId, description, interruptOnCancel=False):
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
Parameters
----------
f : function
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = int((i - minv) / inc) if even else bisect.bisect_right(buckets, i) - 1
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self,
path,
outputFormatClass,
keyClass=None,
valueClass=None,
keyConverter=None,
valueConverter=None,
conf=None,
):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self,
path,
outputFormatClass,
keyClass=None,
valueClass=None,
keyConverter=None,
valueConverter=None,
conf=None,
compressionCodecClass=None,
):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> from tempfile import NamedTemporaryFile
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from tempfile import NamedTemporaryFile
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (str, bytes)):
x = str(x)
if isinstance(x, str):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5)
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self,
createCombiner,
mergeValue,
mergeCombiners,
numPartitions=None,
partitionFunc=portable_hash,
):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash
):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
def flat_map_fn(kv):
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
def map_values_fn(kv):
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
Examples
--------
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in `self` that is not contained in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying `f`.
Examples
--------
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
Examples
--------
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
def setName(self, name):
"""
Assign a name to this RDD.
Examples
--------
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode("utf-8")
def getStorageLevel(self):
"""
Get the RDD's current storage level.
Examples
--------
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
Return approximate number of distinct elements in the RDD.
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self, prefetchPartitions=False):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
pyspark.BarrierTaskContext
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self, profile):
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self):
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
:py:class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier:
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "3e5fce909d7002d3e199b88271b1f875",
"timestamp": "",
"source": "github",
"line_count": 3097,
"max_line_length": 100,
"avg_line_length": 35.00968679367129,
"alnum_prop": 0.5672400276688956,
"repo_name": "xuanyuanking/spark",
"id": "97b87ea87e8348e6e5e7ca10c5111b8bdff3a73d",
"size": "109210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/rdd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "54336"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26221"
},
{
"name": "Dockerfile",
"bytes": "9711"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1872438"
},
{
"name": "Java",
"bytes": "4519872"
},
{
"name": "JavaScript",
"bytes": "222664"
},
{
"name": "Jupyter Notebook",
"bytes": "4310516"
},
{
"name": "Makefile",
"bytes": "2374"
},
{
"name": "PLpgSQL",
"bytes": "352963"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "7388289"
},
{
"name": "R",
"bytes": "1272682"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "31791"
},
{
"name": "Scala",
"bytes": "40053974"
},
{
"name": "Shell",
"bytes": "230591"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "98156"
}
],
"symlink_target": ""
} |
from geopy.geocoders import AzureMaps
from test.geocoders.tomtom import BaseTestTomTom
from test.geocoders.util import env
class TestAzureMaps(BaseTestTomTom):
@classmethod
def make_geocoder(cls, **kwargs):
return AzureMaps(env['AZURE_SUBSCRIPTION_KEY'], timeout=3,
**kwargs)
| {
"content_hash": "06f8646eee6ac7eb340cd0c8ca4820f2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 66,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.703125,
"repo_name": "geopy/geopy",
"id": "5ea6931c26a5c836f330aa1a3f592844a3203b00",
"size": "320",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/geocoders/azure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "Python",
"bytes": "565428"
}
],
"symlink_target": ""
} |
import os
import webob.static
import ryu.base
import ryu.app.wsgi
class Application(ryu.base.app_manager.RyuApp):
_CONTEXTS = {
'wsgi': ryu.app.wsgi.WSGIApplication,
}
def __init__(self, *args, **kwargs):
super(Application, self).__init__(*args, **kwargs)
kwargs['wsgi'].register(Controller)
class Controller(ryu.app.wsgi.ControllerBase):
def __init__(self, req, link, data, **config):
super(Controller, self).__init__(req, link, data, **config)
self.path = '%s/html/' % os.path.dirname(__file__)
self.app = webob.static.DirectoryApp(self.path)
@ryu.app.wsgi.route('oftgui', '/oftgui/{file:.*}')
def handle_static(self, req, **kwargs):
if kwargs['file']:
req.path_info = kwargs['file']
return self.app(req)
ryu.base.app_manager.require_app('oftroute.flow')
ryu.base.app_manager.require_app('oftroute.trace')
ryu.base.app_manager.require_app('ryu.app.rest_topology')
ryu.base.app_manager.require_app('ryu.app.ws_topology')
| {
"content_hash": "e569ebe66ebc408a5bc370002f3c2d78",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 67,
"avg_line_length": 29.485714285714284,
"alnum_prop": 0.6424418604651163,
"repo_name": "atzm/oftgui",
"id": "014c1f2473998ce7147f08d4bb401b28586cda5a",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oftgui/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1324"
},
{
"name": "HTML",
"bytes": "2840"
},
{
"name": "JavaScript",
"bytes": "35897"
},
{
"name": "Python",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from robot import model, utils
from keyword import Keyword
class TestCase(model.TestCase):
"""Results of a single test case."""
__slots__ = ['status', 'message', 'starttime', 'endtime']
keyword_class = Keyword
def __init__(self, name='', doc='', tags=None, timeout=None, status='FAIL',
message='', starttime=None, endtime=None):
model.TestCase.__init__(self, name, doc, tags, timeout)
#: String 'PASS' of 'FAIL'.
self.status = status
#: Possible failure message.
self.message = message
#: Test case execution start time in format ``%Y%m%d %H:%M:%S.%f``.
self.starttime = starttime
#: Test case execution end time in format ``%Y%m%d %H:%M:%S.%f``.
self.endtime = endtime
@property
def elapsedtime(self):
"""Elapsed execution time of the test case in milliseconds."""
return utils.get_elapsed_time(self.starttime, self.endtime)
@property
def passed(self):
"""``True`` if the test case did pass, ``False`` otherwise."""
return self.status == 'PASS'
@property
def critical(self):
"""``True`` if the test case is marked as critical,
``False`` otherwise.
"""
if not self.parent:
return True
return self.parent.criticality.test_is_critical(self)
| {
"content_hash": "aaabff64f59c9bb75893b0be3789c352",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 33.975,
"alnum_prop": 0.5916114790286976,
"repo_name": "ktan2020/legacy-automation",
"id": "c84f125b8deffce124941bec616c4cb7c77508ab",
"size": "1965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/robot/result/testcase.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
} |
"""A utility class to write to and read from a non-blocking socket."""
from __future__ import unicode_literals
import errno
import socket
import ssl
from marrow.io import ioloop
from marrow.util.compat import exception
log = __import__('logging').getLogger(__name__)
__all__ = ['IOStream', 'SSLIOStream']
class IOStream(object):
"""A utility class to write to and read from a non-blocking socket.
We support three methods: write(), read_until(), and read_bytes().
All of the methods take callbacks (since writing and reading are
non-blocking and asynchronous). read_until() reads the socket until
a given delimiter, and read_bytes() reads until a specified number
of bytes have been read from the socket.
A very simple (and broken) HTTP client using this class:
import ioloop
import iostream
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("friendfeed.com", 80))
stream = IOStream(s)
def on_headers(data):
headers = {}
for line in data.split("\r\n"):
parts = line.split(":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers["Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
ioloop.IOLoop.instance().stop()
stream.write("GET / HTTP/1.0\r\n\r\n")
stream.read_until("\r\n\r\n", on_headers)
ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, io_loop=None, max_buffer_size=104857600, read_chunk_size=4096):
self.socket = socket
self.address = socket.getpeername()
self.socket.setblocking(False)
self.io_loop = io_loop or ioloop.IOLoop.instance()
self.max_buffer_size = max_buffer_size
self.read_chunk_size = read_chunk_size
self._read_buffer = b""
self._write_buffer = b""
self._read_delimiter = None
self._read_bytes = None
self._read_callback = None
self._write_callback = None
self._close_callback = None
self._state = self.io_loop.ERROR
self.io_loop.add_handler(self.socket.fileno(), self._handle_events, self._state)
def read_until(self, delimiter, callback):
"""Call callback when we read the given delimiter."""
assert not self._read_callback, "Already reading."
loc = self._read_buffer.find(delimiter)
if loc != -1:
self._run_callback(callback, self._consume(loc + len(delimiter)))
return
# TODO: Allow multiple registered callbacks.
self._check_closed()
self._read_delimiter = delimiter
self._read_callback = callback
self._add_io_state(self.io_loop.READ)
def read_bytes(self, num_bytes, callback):
"""Call callback when we read the given number of bytes."""
assert not self._read_callback, "Already reading"
if len(self._read_buffer) >= num_bytes:
self._run_callback(callback, self._consume(num_bytes))
return
# TODO: Allow multiple registered callbacks.
self._check_closed()
self._read_bytes = num_bytes
self._read_callback = callback
self._add_io_state(self.io_loop.READ)
def write(self, data, callback=None):
"""Write the given data to this stream.
If callback is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
"""
self._check_closed()
self._write_buffer += data
self._add_io_state(self.io_loop.WRITE)
# TODO: Allow multiple registered callbacks.
self._write_callback = callback
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed."""
# TODO: Allow multiple registered callbacks.
self._close_callback = callback
# TODO: set_exception_callback
def close(self):
"""Close this stream."""
if self.socket is not None:
self.io_loop.remove_handler(self.socket.fileno())
try:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
except:
pass
self.socket = None
# TODO: Allow multiple registered callbacks.
if self._close_callback:
self._run_callback(self._close_callback)
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return len(self._write_buffer) # w/o > 0 = 27% speed increase if False, 5% if True
def closed(self):
return self.socket is None
def _handle_events(self, fd, events):
if not self.socket:
log.warning("Got events for closed stream %d", fd)
return
if events & self.io_loop.READ:
self._handle_read()
if not self.socket:
return
if events & self.io_loop.WRITE:
self._handle_write()
if not self.socket:
return
if events & self.io_loop.ERROR:
self.close()
return
state = self.io_loop.ERROR
if self._read_delimiter or self._read_bytes:
state |= self.io_loop.READ
if self._write_buffer:
state |= self.io_loop.WRITE
if state != self._state:
self._state = state
self.io_loop.update_handler(self.socket.fileno(), self._state)
def _run_callback(self, callback, *args, **kwargs):
try:
callback(*args, **kwargs)
except:
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close()
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
def _handle_read(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error:
e = exception().exception
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
else:
log.warning("Read error on %d: %s", self.socket.fileno(), e)
self.close()
return
if not chunk:
self.close()
return
self._read_buffer += chunk
rblen = len(self._read_buffer)
if rblen >= self.max_buffer_size:
log.error("Connection %d reached maximum read buffer size.", self.socket.fileno())
self.close()
return
if self._read_bytes:
if rblen >= self._read_bytes:
num_bytes = self._read_bytes
callback = self._read_callback
self._read_callback = None
self._read_bytes = None
self._run_callback(callback, self._consume(num_bytes))
elif self._read_delimiter:
loc = self._read_buffer.find(self._read_delimiter)
if loc != -1:
callback = self._read_callback
delimiter_len = len(self._read_delimiter)
self._read_callback = None
self._read_delimiter = None
self._run_callback(callback, self._consume(loc + delimiter_len))
def _handle_write(self):
while self._write_buffer:
try:
num_bytes = self.socket.send(self._write_buffer[:128 * 1024])
self._write_buffer = self._write_buffer[num_bytes:]
except socket.error:
e = exception().exception
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
break
else:
log.warning("Write error on %d: %s", self.socket.fileno(), e)
self.close()
return
# TODO: Allow multiple callbacks.
if not self._write_buffer and self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
def _consume(self, loc):
result = self._read_buffer[:loc]
self._read_buffer = self._read_buffer[loc:]
return result
def _check_closed(self):
if not self.socket:
raise IOError("Stream is closed.")
def _add_io_state(self, state):
if not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.socket.fileno(), self._state)
class SSLIOStream(IOStream):
"""Sets up an SSL connection in a non-blocking manner"""
def __init__(self, *args, **kwargs):
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._do_ssl_handshake()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self.socket.do_handshake()
except ssl.SSLError:
err = exception().exception
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._add_io_state(self.io_loop.READ)
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._add_io_state(self.io_loop.WRITE)
return
elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
return self.close()
elif err.args[0] == ssl.SSL_ERROR_SSL:
self.close()
raise
except socket.error:
err = exception().exception
if err.args[0] == errno.ECONNABORTED:
return self.close()
else:
self._ssl_accepting = False
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
| {
"content_hash": "bd0a08dae0a168dd88a6e936438a7500",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 94,
"avg_line_length": 34.245341614906835,
"alnum_prop": 0.5458420241226082,
"repo_name": "marrow/io",
"id": "1747d89f3d13b69b2d63f942d2b89d7477742f6f",
"size": "11676",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "marrow/io/iostream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59049"
}
],
"symlink_target": ""
} |
import os
import random
import numpy as np
from collections import Counter
def main(data_path):
# randomly separate 5 files in each category for test
labels = [name for name in os.listdir(data_path)]
trainset = {}
testset = {}
dics = []
os.system('rm -r data/')
os.system('mkdir data')
for i, name in enumerate(labels):
print 'processing for label(%d) - %s' % (i, name)
os.system('mkdir data/'+name)
subpath = os.path.join(data_path, name)
files = os.listdir(subpath)
testfiles = random.sample(files, 30)
trainfiles = [f for f in files if f not in testfiles]
# merge files
train_out = []
for trainf in trainfiles:
lines = [line for line in open(subpath+'/'+trainf, 'r').xreadlines()]
train_out += lines
# train_out = "\n".join(train_out)
f = open('data/'+name+'/train.txt', 'w')
f.writelines(train_out)
f.close()
if __name__ == "__main__":
# windows path
data_path_windows = "D:/sources/nbsvm-demo/dataset/devset"
data_path_ubuntu_dev = "/home/hkh/sources/dataset/devset"
data_path_ubuntu = "/home/hkh/sources/dataset/20newsgroup"
main(data_path_ubuntu)
| {
"content_hash": "c59c36063c5b6da920fed996f00b4e81",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 81,
"avg_line_length": 28.272727272727273,
"alnum_prop": 0.5956591639871383,
"repo_name": "hkhpub/nbsvm-demo",
"id": "ee96771adc670122cdc037461f2ed87c3023a147",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataset/preprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "68415"
},
{
"name": "C++",
"bytes": "63303"
},
{
"name": "Makefile",
"bytes": "2822"
},
{
"name": "Matlab",
"bytes": "1089"
},
{
"name": "Python",
"bytes": "37437"
},
{
"name": "Shell",
"bytes": "2459"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.