code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base import *
class Quagga(Container):
CONTAINER_NAME = None
GUEST_DIR = '/root/config'
def __init__(self, host_dir, conf, image='bgperf/quagga'):
super(Quagga, self).__init__(self.CONTAINER_NAME, image, host_dir, self.GUEST_DIR, conf)
@classmethod
def build_image(cls, force=False, tag='bgperf/quagga', checkout='HEAD', nocache=False):
cls.dockerfile = '''
FROM ubuntu:latest
WORKDIR /root
RUN useradd -M quagga
RUN mkdir /var/log/quagga && chown quagga:quagga /var/log/quagga
RUN mkdir /var/run/quagga && chown quagga:quagga /var/run/quagga
RUN apt-get update && apt-get install -qy git autoconf libtool gawk make telnet libreadline6-dev
RUN git clone git://git.sv.gnu.org/quagga.git quagga
RUN cd quagga && git checkout {0} && ./bootstrap.sh && \
./configure --disable-doc --localstatedir=/var/run/quagga && make && make install
RUN ldconfig
'''.format(checkout)
super(Quagga, cls).build_image(force, tag, nocache)
class QuaggaTarget(Quagga, Target):
CONTAINER_NAME = 'bgperf_quagga_target'
CONFIG_FILE_NAME = 'bgpd.conf'
def write_config(self, scenario_global_conf):
config = """hostname bgpd
password zebra
router bgp {0}
bgp router-id {1}
""".format(self.conf['as'], self.conf['router-id'])
def gen_neighbor_config(n):
local_addr = n['local-address']
c = """neighbor {0} remote-as {1}
neighbor {0} advertisement-interval 1
neighbor {0} route-server-client
neighbor {0} timers 30 90
""".format(local_addr, n['as'])
if 'filter' in n:
for p in (n['filter']['in'] if 'in' in n['filter'] else []):
c += 'neighbor {0} route-map {1} export\n'.format(local_addr, p)
return c
with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:
f.write(config)
for n in list(flatten(t.get('neighbors', {}).values() for t in scenario_global_conf['testers'])) + [scenario_global_conf['monitor']]:
f.write(gen_neighbor_config(n))
if 'policy' in scenario_global_conf:
seq = 10
for k, v in scenario_global_conf['policy'].iteritems():
match_info = []
for i, match in enumerate(v['match']):
n = '{0}_match_{1}'.format(k, i)
if match['type'] == 'prefix':
f.write(''.join('ip prefix-list {0} deny {1}\n'.format(n, p) for p in match['value']))
f.write('ip prefix-list {0} permit any\n'.format(n))
elif match['type'] == 'as-path':
f.write(''.join('ip as-path access-list {0} deny _{1}_\n'.format(n, p) for p in match['value']))
f.write('ip as-path access-list {0} permit .*\n'.format(n))
elif match['type'] == 'community':
f.write(''.join('ip community-list standard {0} permit {1}\n'.format(n, p) for p in match['value']))
f.write('ip community-list standard {0} permit\n'.format(n))
elif match['type'] == 'ext-community':
f.write(''.join('ip extcommunity-list standard {0} permit {1} {2}\n'.format(n, *p.split(':', 1)) for p in match['value']))
f.write('ip extcommunity-list standard {0} permit\n'.format(n))
match_info.append((match['type'], n))
f.write('route-map {0} permit {1}\n'.format(k, seq))
for info in match_info:
if info[0] == 'prefix':
f.write('match ip address prefix-list {0}\n'.format(info[1]))
elif info[0] == 'as-path':
f.write('match as-path {0}\n'.format(info[1]))
elif info[0] == 'community':
f.write('match community {0}\n'.format(info[1]))
elif info[0] == 'ext-community':
f.write('match extcommunity {0}\n'.format(info[1]))
seq += 10
def get_startup_cmd(self):
return '\n'.join(
['#!/bin/bash',
'ulimit -n 65536',
'bgpd -u root -f {guest_dir}/{config_file_name}']
).format(
guest_dir=self.guest_dir,
config_file_name=self.CONFIG_FILE_NAME)
| osrg/bgperf | quagga.py | Python | apache-2.0 | 5,099 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all networks.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201805')
networks = network_service.getAllNetworks()
# Print out some information for each network.
for network in networks:
print('Network with network code "%s" and display name "%s" was found.'
% (network['networkCode'], network['displayName']))
print '\nNumber of results found: %s' % len(networks)
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| Aloomaio/googleads-python-lib | examples/ad_manager/v201805/network_service/get_all_networks.py | Python | apache-2.0 | 1,350 |
# Copyright 2021 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.ops import py_metrics_ops
from waymo_open_dataset.protos import motion_metrics_pb2
class MotionMetricsOpsTest(tf.test.TestCase):
"""Unit tests for motion metrics."""
def _BuildConfig(self, additional_config_str=''):
"""Builds a metrics config."""
config = motion_metrics_pb2.MotionMetricsConfig()
config_text = """
track_steps_per_second: 10
prediction_steps_per_second: 10
track_history_samples: 0
track_future_samples: 4
step_configurations {
measurement_step: 3
lateral_miss_threshold: 1.0
longitudinal_miss_threshold: 2.0
}
max_predictions: 6
speed_scale_lower: 1.0
speed_scale_upper: 1.0
speed_lower_bound: 1.4
speed_upper_bound: 11.0
""" + additional_config_str
text_format.Parse(config_text, config)
return config
def _CreateTestScenario(self):
gt_scenario_id = ['test']
gt_object_id = [[1, 2]]
gt_object_type = [[1, 1]]
gt_is_valid = np.ones([1, 2, 5], dtype=np.bool)
gt_trajectory = np.reshape([[[2, 2, 1, 1, 0.78539816, 20.0, 20.0],
[4, 4, 1, 1, 0.78539816, 20.0, 20.0],
[6, 6, 1, 1, 0.78539816, 20.0, 20.0],
[8, 8, 1, 1, 0.78539816, 20.0, 20.0],
[10, 10, 1, 1, 0.78539816, 20.0, 20.0]],
[[-1, 0, 1, 1, 3.14159, -10.0, 0.0],
[-2, 0, 1, 1, 3.14159, -10.0, 0.0],
[-3, 0, 1, 1, 3.14159, -10.0, 0.0],
[-4, 0, 1, 1, 3.14159, -10.0, 0.0],
[-5, 0, 1, 1, 3.14159, -10.0, 0.0]]],
[1, 2, 5, 7])
pred_gt_indices = np.reshape([0, 1], (1, 1, 2))
pred_gt_indices_mask = np.ones((1, 1, 2)) > 0.0
return {
'scenario_id': gt_scenario_id,
'object_id': gt_object_id,
'object_type': gt_object_type,
'gt_is_valid': gt_is_valid,
'gt_trajectory': gt_trajectory,
'pred_gt_indices': pred_gt_indices,
'pred_gt_indices_mask': pred_gt_indices_mask,
}
def setUp(self):
super(MotionMetricsOpsTest, self).setUp()
self._config = self._BuildConfig()
self._gt = self._CreateTestScenario()
def _RunEval(self, pred_score, pred_trajectory, gt=None, config=None):
if not gt:
gt = self._gt
if not config:
config = self._config
g = tf.Graph()
with g.as_default():
(min_ade, min_fde, miss_rate, overlap_rate,
mean_ap) = py_metrics_ops.motion_metrics(
config=config.SerializeToString(),
prediction_trajectory=pred_trajectory,
prediction_score=pred_score,
ground_truth_trajectory=gt['gt_trajectory'],
ground_truth_is_valid=gt['gt_is_valid'],
prediction_ground_truth_indices=gt['pred_gt_indices'],
prediction_ground_truth_indices_mask=gt['pred_gt_indices_mask'],
object_type=gt['object_type'],
object_id=gt['object_id'],
scenario_id=gt['scenario_id'])
with self.test_session(graph=g) as sess:
return sess.run([min_ade, min_fde, miss_rate, overlap_rate, mean_ap])
def testComputeMissRateNoMisses(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeMissRateNoMisses2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[-2, 0], [-3, 0], [-4, 0], [-5, 0]],
[[4, 4], [6, 6], [8, 8], [10, 10]]],
(1, 1, 1, 2, 4, 2))
gt = copy.deepcopy(self._gt)
gt['pred_gt_indices'] = np.reshape([1, 0], (1, 1, 2))
val = self._RunEval(pred_score, pred_trajectory, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeMissRateLateral_2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape(
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 1.01], [-3, 1.01], [-4, 1.01], [-5, 1.01]]], (1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLateral_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [9.292, 10.708]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLongitudinal_2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLongitudinal_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.415, 11.415]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeNoMissLongitudinal_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.414, 11.414]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeVelocityScalingLatitudinal(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0.75]]],
(1, 1, 1, 2, 4, 2))
config = motion_metrics_pb2.MotionMetricsConfig()
config.CopyFrom(self._config)
config.speed_scale_lower = 0.5
config.speed_scale_upper = 1.0
config.speed_lower_bound = 1.0
config.speed_upper_bound = 3.0
val = self._RunEval(pred_score, pred_trajectory, config=config)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
# Decrease the velocity below the speed lower bound.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5:7] = 0.0
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just below the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 1.999
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just above the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 2.001
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
def testComputeVelocityScalingLongitudinal(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-6.5, 0]]],
(1, 1, 1, 2, 4, 2))
config = motion_metrics_pb2.MotionMetricsConfig()
config.CopyFrom(self._config)
config.speed_scale_lower = 0.5
config.speed_scale_upper = 1.0
config.speed_lower_bound = 1.0
config.speed_upper_bound = 3.0
val = self._RunEval(pred_score, pred_trajectory, config=config)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
# Decrease the velocity below the speed lower bound.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5:7] = 0.0
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just below the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 1.999
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just above the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 2.001
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
def testComputeNoMissLateral_2(self):
pred_score = np.reshape([0.8, 0.5], (1, 1, 2))
pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [9.294, 10.706]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]],
(1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testTwoJointPredictionsNoMiss(self):
pred_score = np.reshape([0.8, 0.5], (1, 1, 2))
pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]],
(1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.5)
def testTwoJointPredictionsMiss(self):
pred_score = np.reshape([0.8, 0.5], (1, 1, 2))
pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
[[[4, 4], [6, 6], [8, 8], [14, 14]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]],
(1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMinADE(self):
pred_score = np.reshape([0.5, 0.5], (1, 1, 2))
pred_trajectory = np.reshape(
[[[[4, 0], [6, 0], [8, 0], [10, 0]], [[0, 2], [0, 3], [0, 4], [0, 5]]],
[[[14, 0], [16, 0], [18, 0], [20, 0]],
[[0, 22], [0, 23], [0, 24], [0, 25]]]], (1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# 5 metrics.
self.assertEqual(len(val), 5)
# 3 steps.
self.assertEqual(len(val[0]), 3)
# ADE of Vehicle.
self.assertAlmostEqual(val[0][0], 5.97487, delta=1e-4)
# FDE of Vehicle.
self.assertAlmostEqual(val[1][0], 8.53553, delta=1e-4)
if __name__ == '__main__':
tf.compat.v1.disable_eager_execution()
tf.test.main()
| waymo-research/waymo-open-dataset | waymo_open_dataset/metrics/ops/motion_metrics_ops_test.py | Python | apache-2.0 | 13,287 |
from __future__ import unicode_literals
import os
from mopidy import config, exceptions, ext
__version__ = '0.2.2'
class GMusicExtension(ext.Extension):
dist_name = 'Mopidy-GMusic'
ext_name = 'gmusic'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(GMusicExtension, self).get_config_schema()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['deviceid'] = config.String(optional=True)
return schema
def validate_environment(self):
try:
import gmusicapi # noqa
except ImportError as e:
raise exceptions.ExtensionError('gmusicapi library not found', e)
pass
def get_backend_classes(self):
from .actor import GMusicBackend
return [GMusicBackend]
| jeh/mopidy-gmusic | mopidy_gmusic/__init__.py | Python | apache-2.0 | 976 |
import types
import unittest
from collections import namedtuple
import mock
from plugins.systems.config_container_crawler import ConfigContainerCrawler
from plugins.systems.config_host_crawler import ConfigHostCrawler
from plugins.systems.connection_container_crawler import ConnectionContainerCrawler
from plugins.systems.connection_host_crawler import ConnectionHostCrawler
from plugins.systems.connection_vm_crawler import ConnectionVmCrawler
from plugins.systems.cpu_container_crawler import CpuContainerCrawler
from plugins.systems.cpu_host_crawler import CpuHostCrawler
from plugins.systems.disk_container_crawler import DiskContainerCrawler
from plugins.systems.disk_host_crawler import DiskHostCrawler
from plugins.systems.dockerhistory_container_crawler import DockerhistoryContainerCrawler
from plugins.systems.dockerinspect_container_crawler import DockerinspectContainerCrawler
from plugins.systems.dockerps_host_crawler import DockerpsHostCrawler
from plugins.systems.file_container_crawler import FileContainerCrawler
from plugins.systems.file_host_crawler import FileHostCrawler
from plugins.systems.interface_container_crawler import InterfaceContainerCrawler
from plugins.systems.interface_host_crawler import InterfaceHostCrawler
from plugins.systems.interface_vm_crawler import InterfaceVmCrawler
from plugins.systems.load_container_crawler import LoadContainerCrawler
from plugins.systems.load_host_crawler import LoadHostCrawler
from plugins.systems.memory_container_crawler import MemoryContainerCrawler
from plugins.systems.memory_host_crawler import MemoryHostCrawler
from plugins.systems.memory_vm_crawler import MemoryVmCrawler
from plugins.systems.metric_container_crawler import MetricContainerCrawler
from plugins.systems.metric_host_crawler import MetricHostCrawler
from plugins.systems.metric_vm_crawler import MetricVmCrawler
from plugins.systems.os_container_crawler import OSContainerCrawler
from plugins.systems.os_host_crawler import OSHostCrawler
from plugins.systems.os_vm_crawler import os_vm_crawler
from plugins.systems.package_container_crawler import PackageContainerCrawler
from plugins.systems.package_host_crawler import PackageHostCrawler
from plugins.systems.process_container_crawler import ProcessContainerCrawler
from plugins.systems.process_host_crawler import ProcessHostCrawler
from plugins.systems.process_vm_crawler import process_vm_crawler
from container import Container
from utils.crawler_exceptions import CrawlError
from utils.features import (
OSFeature,
ConfigFeature,
DiskFeature,
PackageFeature,
MemoryFeature,
CpuFeature,
InterfaceFeature,
LoadFeature,
DockerPSFeature)
# for OUTVM psvmi
class DummyContainer(Container):
def __init__(self, long_id):
self.pid = '1234'
self.long_id = long_id
def get_memory_cgroup_path(self, node):
return '/cgroup/%s' % node
def get_cpu_cgroup_path(self, node):
return '/cgroup/%s' % node
# for OUTVM psvmi
psvmi_sysinfo = namedtuple('psvmi_sysinfo',
'''boottime ipaddr osdistro osname osplatform osrelease
ostype osversion memory_used memory_buffered
memory_cached memory_free''')
psvmi_memory = namedtuple(
'psvmi_memory',
'memory_used memory_buffered memory_cached memory_free')
psvmi_interface = namedtuple(
'psvmi_interface',
'ifname bytes_sent bytes_recv packets_sent packets_recv errout errin')
os_stat = namedtuple(
'os_stat',
'''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size''')
def mocked_os_walk(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
def mocked_os_walk_for_avoidsetns(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/1/2/3', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/1/2/3/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
# XXX can't do self.count = for some reason
mcount = 0
class MockedMemCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
return '2'
def __iter__(self):
return self
def next(self):
global mcount
mcount += 1
if mcount == 1:
return 'total_cache 100'
if mcount == 2:
return 'total_active_file 200'
else:
raise StopIteration()
# XXX can't do self.count = for some reason
ccount = 0
ccount2 = 0
class MockedCpuCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
global ccount2
ccount2 += 1
if ccount2 == 1:
return '1e7'
else:
return '2e7'
def __iter__(self):
return self
def next(self):
global ccount
ccount += 1
if ccount == 1:
return 'system 20'
if ccount == 2:
return 'user 20'
else:
raise StopIteration()
class MockedFile(mock.Mock):
def __init__(self):
pass
def read(self):
return 'content'
def mocked_codecs_open(filename, mode, encoding, errors):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedFile())
m.__exit__ = mock.Mock(return_value=False)
return m
def mocked_cpu_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedCpuCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
def mocked_memory_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedMemCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
partition = namedtuple('partition', 'device fstype mountpoint opts')
pdiskusage = namedtuple('pdiskusage', 'percent total')
meminfo = namedtuple('meminfo', 'rss vms')
ioinfo = namedtuple('ioinfo', 'read_bytes write_bytes')
psutils_memory = namedtuple('psutils_memory', 'used free buffers cached')
psutils_cpu = namedtuple(
'psutils_cpu',
'idle nice user iowait system irq steal')
psutils_net = namedtuple(
'psutils_net',
'bytes_sent bytes_recv packets_sent packets_recv errout errin')
def mocked_disk_partitions(all):
return [partition('/dev/a', 'type', '/a', 'opts'),
partition('/dev/b', 'type', '/b', 'opts')]
class Connection():
def __init__(self):
self.laddr = ['1.1.1.1', '22']
self.raddr = ['2.2.2.2', '22']
self.status = 'Established'
class Process():
def __init__(self, name):
self.name = name
self.cmdline = ['cmd']
self.pid = 123
self.status = 'Running'
self.cwd = '/bin'
self.ppid = 1
self.create_time = 1000
def num_threads(self):
return 1
def username(self):
return 'don quijote'
def get_open_files(self):
return []
def get_connections(self):
return [Connection()]
def get_memory_info(self):
return meminfo(10, 20)
def get_io_counters(self):
return ioinfo(10, 20)
def get_cpu_percent(self, interval):
return 30
def get_memory_percent(self):
return 30
STAT_DIR_MODE = 16749
def mocked_os_lstat(path):
print path
if path == '/':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
elif path == '/file1':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file2':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file3':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/dir':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
else:
return os_stat(1, 2, 3, 4, 5, 6, 7)
def mocked_run_as_another_namespace(pid, ns, function, *args, **kwargs):
result = function(*args)
# if res is a generator (i.e. function uses yield)
if isinstance(result, types.GeneratorType):
result = list(result)
return result
def throw_os_error(*args, **kvargs):
raise OSError()
class PluginTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self, *args):
pass
@mock.patch('utils.os_utils.time.time',
side_effect=lambda: 1001)
@mock.patch('utils.os_utils.platform.platform',
side_effect=lambda: 'platform')
@mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses',
side_effect=lambda: ['1.1.1.1'])
@mock.patch('utils.os_utils.psutil.boot_time',
side_effect=lambda: 1000)
@mock.patch('utils.os_utils.platform.system',
side_effect=lambda: 'linux')
@mock.patch('utils.os_utils.platform.machine',
side_effect=lambda: 'machine')
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_host_cawler_plugin(self, *args):
fc = OSHostCrawler()
for os in fc.crawl():
print os
assert os == (
'linux',
OSFeature(
boottime=1000,
uptime=1,
ipaddr=['1.1.1.1'],
os='os',
os_version='os_version',
os_kernel='platform',
architecture='machine'),
'os')
for i, arg in enumerate(args):
if i > 0: # time.time is called more than once
continue
assert arg.call_count == 1
@mock.patch('utils.os_utils.platform.system',
side_effect=throw_os_error)
def test_os_host_crawler_plugin_failure(self, *args):
fc = OSHostCrawler()
with self.assertRaises(OSError):
for os in fc.crawl():
pass
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_host_crawler_plugin_mountpoint_mode(self, *args):
fc = OSHostCrawler()
for os in fc.crawl(root_dir='/a'):
print os
assert os == (
'linux',
OSFeature(
boottime='unsupported',
uptime='unsupported',
ipaddr='0.0.0.0',
os='os',
os_version='os_version',
os_kernel='unknown',
architecture='unknown'),
'os')
for i, arg in enumerate(args):
assert arg.call_count == 1
@mock.patch('utils.os_utils.osinfo.get_osinfo',
side_effect=throw_os_error)
def test_os_host_crawler_plugin_mountpoint_mode_failure(self, *args):
fc = OSHostCrawler()
with self.assertRaises(OSError):
for os in fc.crawl(root_dir='/a'):
pass
@mock.patch('utils.os_utils.time.time',
side_effect=lambda: 1001)
@mock.patch('utils.os_utils.platform.platform',
side_effect=lambda: 'platform')
@mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses',
side_effect=lambda: ['1.1.1.1'])
@mock.patch('utils.os_utils.psutil.boot_time',
side_effect=lambda: 1000)
@mock.patch('utils.os_utils.platform.system',
side_effect=lambda: 'linux')
@mock.patch('utils.os_utils.platform.machine',
side_effect=lambda: 'machine')
@mock.patch(
("plugins.systems.os_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_container_crawler_plugin(self, *args):
fc = OSContainerCrawler()
for os in fc.crawl(container_id=123):
print os
assert os == (
'linux',
OSFeature(
boottime=1000,
uptime=1,
ipaddr=['1.1.1.1'],
os='os',
os_version='os_version',
os_kernel='platform',
architecture='machine'),
'os')
for i, arg in enumerate(args):
if i > 0: # time.time is called more than once
continue
assert arg.call_count == 1
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.os_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_container_crawler_plugin_avoidsetns(self, *args):
fc = OSContainerCrawler()
for os in fc.crawl(container_id=123, avoid_setns=True):
print os
assert os == (
'linux',
OSFeature(
boottime='unsupported',
uptime='unsupported',
ipaddr='0.0.0.0',
os='os',
os_version='os_version',
os_kernel='unknown',
architecture='unknown'),
'os')
for i, arg in enumerate(args):
print i, arg
if i == 0:
# get_osinfo()
assert arg.call_count == 1
arg.assert_called_with(mount_point='/a/b/c')
elif i == 1:
# get_docker_container_rootfs_path
assert arg.call_count == 1
arg.assert_called_with(123)
else:
# exec_dockerinspect
assert arg.call_count == 1
arg.assert_called_with(123)
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.os_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=throw_os_error)
def test_os_container_crawler_plugin_avoidsetns_failure(self, *args):
fc = OSContainerCrawler()
with self.assertRaises(OSError):
for os in fc.crawl(container_id=123, avoid_setns=True):
pass
@mock.patch('plugins.systems.os_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.os_vm_crawler.psvmi.system_info',
side_effect=lambda vmc: psvmi_sysinfo(1000,
'1.1.1.1',
'osdistro',
'osname',
'osplatform',
'osrelease',
'ostype',
'osversion',
1000000,
100000,
100000,
100000))
@mock.patch('plugins.systems.os_vm_crawler.psvmi')
def test_os_vm_crawler_plugin_without_vm(self, *args):
fc = os_vm_crawler()
for os in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert os == (
'ostype',
OSFeature(
boottime=1000,
uptime='unknown',
ipaddr='1.1.1.1',
os='ostype',
os_version='osversion',
os_kernel='osrelease',
architecture='osplatform'),
'os')
pass
assert args[1].call_count == 1
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler(self, *args):
fc = FileHostCrawler()
for (k, f, fname) in fc.crawl():
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler_with_exclude_dirs(self, *args):
fc = FileHostCrawler()
for (k, f, fname) in fc.crawl(exclude_dirs=['dir']):
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=throw_os_error)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler_failure(self, *args):
fc = FileHostCrawler()
with self.assertRaises(OSError):
for (k, f, fname) in fc.crawl(root_dir='/a/b/c'):
pass
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/'):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch('utils.file_utils.os.walk',
side_effect=throw_os_error)
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_failure(self, *args):
fc = FileContainerCrawler()
with self.assertRaises(OSError):
for (k, f, fname) in fc.crawl(root_dir='/a/b/c'):
pass
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk_for_avoidsetns)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_avoidsetns(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/', avoid_setns=True):
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/1/2/3')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/1/2/3')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_with_exclude_dirs(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/',
exclude_dirs=['dir']):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk_for_avoidsetns)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_avoidsetns_with_exclude_dirs(
self,
*
args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/',
avoid_setns=True,
exclude_dirs=['/dir']):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/1/2/3')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_host_crawler(self, *args):
fc = ConfigHostCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # lstat
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_host_crawler_with_discover(self, *args):
fc = ConfigHostCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=True)
print configs
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.config_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler(self, *args):
fc = ConfigContainerCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # codecs open
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.config_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
def test_config_container_crawler_discover(self, *args):
fc = ConfigContainerCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=True)
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
("plugins.systems.config_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.config_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler_avoidsetns(self, *args):
fc = ConfigContainerCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False,
avoid_setns=True):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # lstat
@mock.patch(
("plugins.systems.config_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.config_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler_avoidsetns_discover(self, *args):
fc = ConfigContainerCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
avoid_setns=True,
discover_config_files=True)
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_host_crawler_dpkg(self, *args):
fc = PackageHostCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=throw_os_error)
def test_package_host_crawler_dpkg_failure(self, *args):
fc = PackageHostCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'redhat',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_rpm_packages',
side_effect=lambda a, b, c, d: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_host_crawler_rpm(self, *args):
fc = PackageHostCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/rpm', 0, False)
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_container_crawler_dpkg(self, *args):
fc = PackageContainerCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True if 'dpkg' in p else False)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=throw_os_error)
def test_package_container_crawler_dpkg_failure(self, *args):
fc = PackageContainerCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
# get_dpkg_packages is called a second time after the first failure.
# first time is OUTCONTAINER mode with setns
# second time is OUTCONTAINER mode with avoid_setns
assert args[0].call_count == 2
args[0].assert_called_with('/a/b/c', 'var/lib/dpkg', 0)
args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo()
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'redhat',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True if 'rpm' in p else False)
@mock.patch('utils.package_utils.get_rpm_packages',
side_effect=throw_os_error)
def test_package_container_crawler_rpm_failure(self, *args):
fc = PackageContainerCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
# get_dpkg_packages is called a second time after the first failure.
# first time is OUTCONTAINER mode with setns
# second time is OUTCONTAINER mode with avoid_setns
assert args[0].call_count == 2
args[0].assert_called_with('/a/b/c', 'var/lib/rpm', 0, True)
args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo()
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_container_crawler_avoidsetns(self, *args):
fc = PackageContainerCrawler()
for (k, f, fname) in fc.crawl(avoid_setns=True):
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
@mock.patch('plugins.systems.process_host_crawler.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_process_host_crawler(self, *args):
fc = ProcessHostCrawler()
for (k, f, fname) in fc.crawl():
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[0].call_count == 1
@mock.patch(
("plugins.systems.process_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.process_container_crawler.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.process_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
def test_process_container_crawler(self, *args):
fc = ProcessContainerCrawler()
for (k, f, fname) in fc.crawl('123'):
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[0].call_count == 1
@mock.patch('plugins.systems.process_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.process_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch('plugins.systems.process_vm_crawler.psvmi')
def test_process_vm_crawler(self, *args):
fc = process_vm_crawler()
for (k, f, fname) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[1].call_count == 1 # process_iter
@mock.patch('utils.disk_utils.psutil.disk_partitions',
side_effect=mocked_disk_partitions)
@mock.patch('utils.disk_utils.psutil.disk_usage',
side_effect=lambda x: pdiskusage(10, 100))
def test_crawl_disk_partitions_invm_mode(self, *args):
fc = DiskHostCrawler()
disks = fc.crawl()
assert set(disks) == set([('/a',
DiskFeature(partitionname='/dev/a',
freepct=90.0,
fstype='type',
mountpt='/a',
mountopts='opts',
partitionsize=100),
'disk'),
('/b',
DiskFeature(partitionname='/dev/b',
freepct=90.0,
fstype='type',
mountpt='/b',
mountopts='opts',
partitionsize=100),
'disk')])
@mock.patch(
'plugins.systems.disk_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.disk_utils.psutil.disk_partitions',
side_effect=mocked_disk_partitions)
@mock.patch('utils.disk_utils.psutil.disk_usage',
side_effect=lambda x: pdiskusage(10, 100))
@mock.patch(
("plugins.systems.disk_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_disk_partitions_outcontainer_mode(self, *args):
fc = DiskContainerCrawler()
disks = fc.crawl('123')
assert set(disks) == set([('/a',
DiskFeature(partitionname='/dev/a',
freepct=90.0,
fstype='type',
mountpt='/a',
mountopts='opts',
partitionsize=100),
'disk'),
('/b',
DiskFeature(partitionname='/dev/b',
freepct=90.0,
fstype='type',
mountpt='/b',
mountopts='opts',
partitionsize=100),
'disk')])
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_crawl_metrics_invm_mode(self, *args):
fc = MetricHostCrawler()
for (k, f, t) in fc.crawl():
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[0].call_count == 1
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch('utils.metric_utils.round',
side_effect=throw_os_error)
def test_crawl_metrics_invm_mode_failure(self, *args):
with self.assertRaises(OSError):
fc = MetricHostCrawler()
for ff in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.metric_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.disk_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_metrics_outcontainer_mode(self, *args):
fc = MetricContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[0].call_count == 1
@mock.patch('plugins.systems.metric_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.metric_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch(
("plugins.systems.metric_vm_crawler."
"MetricVmCrawler._crawl_metrics_cpu_percent"),
side_effect=lambda proc: 30.0)
@mock.patch('plugins.systems.metric_vm_crawler.psvmi')
def test_crawl_metrics_vm_mode(self, *args):
fc = MetricVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[1].call_count == 1 # process_iter
@mock.patch('utils.connection_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_crawl_connections_invm_mode(self, *args):
fc = ConnectionHostCrawler()
for (k, f, t) in fc.crawl():
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[0].call_count == 1
@mock.patch('utils.connection_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.connection_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.connection_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_connections_outcontainer_mode(self, *args):
fc = ConnectionContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[0].call_count == 1
@mock.patch('plugins.systems.connection_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.connection_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch('plugins.systems.connection_vm_crawler.psvmi')
def test_crawl_connections_outvm_mode(self, *args):
fc = ConnectionVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[1].call_count == 1
@mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(2, 2, 3, 4))
def test_crawl_memory_invm_mode(self, *args):
fc = MemoryHostCrawler()
for (k, f, t) in fc.crawl():
assert f == MemoryFeature(
memory_used=2,
memory_buffered=3,
memory_cached=4,
memory_free=2,
memory_util_percentage=50)
assert args[0].call_count == 1
@mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory',
side_effect=throw_os_error)
def test_crawl_memory_invm_mode_failure(self, *args):
fc = MemoryHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('plugins.systems.memory_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.memory_vm_crawler.psvmi.system_memory_info',
side_effect=lambda vmc: psvmi_memory(10, 20, 30, 40))
@mock.patch('plugins.systems.memory_vm_crawler.psvmi')
def test_crawl_memory_outvm_mode(self, *args):
fc = MemoryVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == MemoryFeature(
memory_used=10,
memory_buffered=20,
memory_cached=30,
memory_free=40,
memory_util_percentage=20)
assert args[1].call_count == 1
@mock.patch(
'plugins.systems.memory_container_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(
10,
10,
3,
10))
@mock.patch('plugins.systems.memory_container_crawler.open',
side_effect=mocked_memory_cgroup_open)
@mock.patch('plugins.systems.memory_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_memory_outcontainer_mode(self, *args):
fc = MemoryContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == MemoryFeature(
memory_used=2,
memory_buffered=200,
memory_cached=100,
memory_free=0,
memory_util_percentage=100)
assert args[1].call_count == 3 # 3 cgroup files
@mock.patch(
'plugins.systems.memory_container_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(
10,
10,
3,
10))
@mock.patch('plugins.systems.memory_container_crawler.open',
side_effect=throw_os_error)
@mock.patch('plugins.systems.memory_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_memory_outcontainer_mode_failure(self, *args):
fc = MemoryContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[1].call_count == 1 # 1 cgroup files
@mock.patch(
'plugins.systems.cpu_host_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
def test_crawl_cpu_invm_mode(self, *args):
fc = CpuHostCrawler()
for (k, f, t) in fc.crawl():
assert f == CpuFeature(
cpu_idle=10,
cpu_nice=20,
cpu_user=30,
cpu_wait=40,
cpu_system=50,
cpu_interrupt=60,
cpu_steal=70,
cpu_util=90)
assert args[0].call_count == 1
@mock.patch('plugins.systems.cpu_host_crawler.psutil.cpu_times_percent',
side_effect=throw_os_error)
def test_crawl_cpu_invm_mode_failure(self, *args):
fc = CpuHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
@mock.patch('plugins.systems.cpu_container_crawler.time.sleep')
@mock.patch('plugins.systems.cpu_container_crawler.open',
side_effect=mocked_cpu_cgroup_open)
@mock.patch('plugins.systems.cpu_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_cpu_outcontainer_mode(self, *args):
fc = CpuContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == CpuFeature(
cpu_idle=90.0,
cpu_nice=20,
cpu_user=5.0,
cpu_wait=40,
cpu_system=5.0,
cpu_interrupt=60,
cpu_steal=70,
cpu_util=10.0)
assert args[1].call_count == 3 # open for 3 cgroup files
@mock.patch(
'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
@mock.patch('plugins.systems.cpu_container_crawler.time.sleep')
@mock.patch('plugins.systems.cpu_container_crawler.open',
side_effect=throw_os_error)
@mock.patch('plugins.systems.cpu_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_cpu_outcontainer_mode_failure(self, *args):
fc = CpuContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.interface_host_crawler.psutil.net_io_counters',
side_effect=lambda pernic: {'interface1-unit-tests':
psutils_net(
10,
20,
30,
40,
50,
60)})
def test_crawl_interface_invm_mode(self, *args):
fc = InterfaceHostCrawler()
for (k, f, t) in fc.crawl():
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl():
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[0].call_count == 2
@mock.patch(
'plugins.systems.interface_host_crawler.psutil.net_io_counters',
side_effect=throw_os_error)
def test_crawl_interface_invm_mode_failure(self, *args):
fc = InterfaceHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
# Each crawl in crawlutils.py instantiates a FeaturesCrawler object
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 2
@mock.patch('plugins.systems.interface_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
@mock.patch(
'plugins.systems.interface_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
'plugins.systems.interface_container_crawler.psutil.net_io_counters',
side_effect=lambda pernic: {'eth0':
psutils_net(
10,
20,
30,
40,
50,
60)})
def test_crawl_interface_outcontainer_mode(self, *args):
fc = InterfaceContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl('123'):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[0].call_count == 2
assert args[1].call_count == 2
@mock.patch('plugins.systems.interface_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.interface_vm_crawler.psvmi.interface_iter',
side_effect=lambda vmc: [psvmi_interface(
'eth1', 10, 20, 30, 40, 50, 60)])
@mock.patch('plugins.systems.interface_vm_crawler.psvmi')
def test_crawl_interface_outvm_mode(self, *args):
fc = InterfaceVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[1].call_count == 2
assert args[2].call_count == 2
@mock.patch('plugins.systems.load_host_crawler.os.getloadavg',
side_effect=lambda: [1, 2, 3])
def test_crawl_load_invm_mode(self, *args):
fc = LoadHostCrawler()
for (k, f, t) in fc.crawl():
assert f == LoadFeature(shortterm=1, midterm=2, longterm=2)
assert args[0].call_count == 1
@mock.patch('plugins.systems.load_host_crawler.os.getloadavg',
side_effect=throw_os_error)
def test_crawl_load_invm_mode_failure(self, *args):
fc = LoadHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.load_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('plugins.systems.load_container_crawler.os.getloadavg',
side_effect=lambda: [1, 2, 3])
@mock.patch('plugins.systems.load_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_load_outcontainer_mode(self, *args):
fc = LoadContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == LoadFeature(shortterm=1, midterm=2, longterm=2)
assert args[1].call_count == 1
assert args[2].call_count == 1
@mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps',
side_effect=lambda: [{'State': {'Running': True},
'Image': 'reg/image:latest',
'Config': {'Cmd': 'command'},
'Name': 'name',
'Id': 'id'}])
def test_crawl_dockerps_invm_mode(self, *args):
fc = DockerpsHostCrawler()
for (k, f, t) in fc.crawl():
assert f == DockerPSFeature(
Status=True,
Created=0,
Image='reg/image:latest',
Ports=[],
Command='command',
Names='name',
Id='id')
assert args[0].call_count == 1
@mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps',
side_effect=throw_os_error)
def test_crawl_dockerps_invm_mode_failure(self, *args):
fc = DockerpsHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('plugins.systems.dockerhistory_container_crawler.exec_docker_history',
side_effect=lambda long_id: [
{'Id': 'image1', 'random': 'abc'},
{'Id': 'image2', 'random': 'abc'}])
def test_crawl_dockerhistory_outcontainer_mode(self, *args):
fc = DockerhistoryContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == {'history': [{'Id': 'image1', 'random': 'abc'},
{'Id': 'image2', 'random': 'abc'}]}
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerhistory_container_crawler.exec_docker_history',
side_effect=throw_os_error)
def test_crawl_dockerhistory_outcontainer_mode_failure(self, *args):
fc = DockerhistoryContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect',
side_effect=lambda long_id: {
'Id': 'image1',
'random': 'abc'})
def test_crawl_dockerinspect_outcontainer_mode(self, *args):
fc = DockerinspectContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == {'Id': 'image1', 'random': 'abc'}
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect',
side_effect=throw_os_error)
def test_crawl_dockerinspect_outcontainer_mode_failure(self, *args):
fc = DockerinspectContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
| sastryduri/agentless-system-crawler | tests/unit/test_plugins.py | Python | apache-2.0 | 69,027 |
#########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_security.utils import hash_password
from cloudify.cluster_status import (
DB_STATUS_REPORTER,
BROKER_STATUS_REPORTER,
MANAGER_STATUS_REPORTER,
MANAGER_STATUS_REPORTER_ID,
BROKER_STATUS_REPORTER_ID,
DB_STATUS_REPORTER_ID
)
from manager_rest.storage.models import Tenant, UserTenantAssoc
from manager_rest.storage import user_datastore
from manager_rest.constants import (
DEFAULT_TENANT_ID,
DEFAULT_TENANT_ROLE,
)
ADMIN_ROLE = 'sys_admin'
USER_ROLE = 'default'
USER_IN_TENANT_ROLE = 'user'
def get_admin_user():
return {
'username': 'admin',
'password': 'admin',
'role': ADMIN_ROLE
}
def get_status_reporters():
return [
{
'username': MANAGER_STATUS_REPORTER,
'password': 'password',
'role': MANAGER_STATUS_REPORTER,
'id': MANAGER_STATUS_REPORTER_ID
},
{
'username': BROKER_STATUS_REPORTER,
'password': 'password',
'role': BROKER_STATUS_REPORTER,
'id': BROKER_STATUS_REPORTER_ID
},
{
'username': DB_STATUS_REPORTER,
'password': 'password',
'role': DB_STATUS_REPORTER,
'id': DB_STATUS_REPORTER_ID
},
]
def get_test_users():
test_users = [
{
'username': 'alice',
'password': 'alice_password',
'role': ADMIN_ROLE
},
{
'username': 'bob',
'password': 'bob_password',
'role': USER_ROLE
},
{
'username': 'clair',
'password': 'clair_password',
'role': USER_ROLE,
'active': False
},
{
'username': 'dave',
'password': 'dave_password',
'role': USER_ROLE
}
]
return test_users
def add_users_to_db(user_list):
default_tenant = Tenant.query.get(DEFAULT_TENANT_ID)
for user in user_list:
role = user_datastore.find_role(user['role'])
user_obj = user_datastore.create_user(
username=user['username'],
password=hash_password(user['password']),
roles=[role]
)
default_tenant_role = user_datastore.find_role(DEFAULT_TENANT_ROLE)
user_obj.active = user.get('active', True)
user_tenant_association = UserTenantAssoc(
user=user_obj,
tenant=default_tenant,
role=default_tenant_role,
)
user_obj.tenant_associations.append(user_tenant_association)
user_datastore.commit()
| cloudify-cosmo/cloudify-manager | rest-service/manager_rest/test/security_utils.py | Python | apache-2.0 | 3,245 |
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ONEDrive resolvers."""
| DataONEorg/d1_python | client_onedrive/src/d1_onedrive/impl/resolver/__init__.py | Python | apache-2.0 | 814 |
from __future__ import absolute_import
import logging
import struct
import six
from six.moves import xrange
import kafka.common
import kafka.protocol.commit
import kafka.protocol.fetch
import kafka.protocol.message
import kafka.protocol.metadata
import kafka.protocol.offset
import kafka.protocol.produce
from kafka.codec import (
gzip_encode, gzip_decode, snappy_encode, snappy_decode
)
from kafka.common import (
ProtocolError, ChecksumError,
UnsupportedCodecError,
ConsumerMetadataResponse
)
from kafka.util import (
crc32, read_short_string, read_int_string, relative_unpack,
write_short_string, write_int_string, group_by_topic_and_partition
)
log = logging.getLogger(__name__)
ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
ALL_CODECS = (CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY)
class KafkaProtocol(object):
"""
Class to encapsulate all of the protocol encoding/decoding.
This class does not have any state associated with it, it is purely
for organization.
"""
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9
CONSUMER_METADATA_KEY = 10
###################
# Private API #
###################
@classmethod
def _encode_message_header(cls, client_id, correlation_id, request_key,
version=0):
"""
Encode the common request envelope
"""
return struct.pack('>hhih%ds' % len(client_id),
request_key, # ApiKey
version, # ApiVersion
correlation_id, # CorrelationId
len(client_id), # ClientId size
client_id) # ClientId
@classmethod
def _encode_message_set(cls, messages):
"""
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed
Format
======
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
"""
message_set = []
for message in messages:
encoded_message = KafkaProtocol._encode_message(message)
message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0,
len(encoded_message),
encoded_message))
return b''.join(message_set)
@classmethod
def _encode_message(cls, message):
"""
Encode a single message.
The magic number of a message is a format version number.
The only supported magic number right now is zero
Format
======
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
"""
if message.magic == 0:
msg = b''.join([
struct.pack('>BB', message.magic, message.attributes),
write_int_string(message.key),
write_int_string(message.value)
])
crc = crc32(msg)
msg = struct.pack('>i%ds' % len(msg), crc, msg)
else:
raise ProtocolError("Unexpected magic number: %d" % message.magic)
return msg
##################
# Public API #
##################
@classmethod
def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):
"""
Encode a ProduceRequest struct
Arguments:
payloads: list of ProduceRequestPayload
acks: How "acky" you want the request to be
1: written to disk by the leader
0: immediate response
-1: waits for all replicas to be in sync
timeout: Maximum time (in ms) the server will wait for replica acks.
This is _not_ a socket timeout
Returns: ProduceRequest
"""
if acks not in (1, 0, -1):
raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks)
return kafka.protocol.produce.ProduceRequest(
required_acks=acks,
timeout=timeout,
topics=[(
topic,
[(
partition,
[(0, 0, kafka.protocol.message.Message(msg.value, key=msg.key,
magic=msg.magic,
attributes=msg.attributes))
for msg in payload.messages])
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
@classmethod
def decode_produce_response(cls, response):
"""
Decode ProduceResponse to ProduceResponsePayload
Arguments:
response: ProduceResponse
Return: list of ProduceResponsePayload
"""
return [
kafka.common.ProduceResponsePayload(topic, partition, error, offset)
for topic, partitions in response.topics
for partition, error, offset in partitions
]
@classmethod
def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096):
"""
Encodes a FetchRequest struct
Arguments:
payloads: list of FetchRequestPayload
max_wait_time (int, optional): ms to block waiting for min_bytes
data. Defaults to 100.
min_bytes (int, optional): minimum bytes required to return before
max_wait_time. Defaults to 4096.
Return: FetchRequest
"""
return kafka.protocol.fetch.FetchRequest(
replica_id=-1,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
topics=[(
topic,
[(
partition,
payload.offset,
payload.max_bytes)
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
@classmethod
def decode_fetch_response(cls, response):
"""
Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse
"""
return [
kafka.common.FetchResponsePayload(
topic, partition, error, highwater_offset, [
kafka.common.OffsetAndMessage(offset, message)
for offset, _, message in messages])
for topic, partitions in response.topics
for partition, error, highwater_offset, messages in partitions
]
@classmethod
def encode_offset_request(cls, payloads=()):
return kafka.protocol.offset.OffsetRequest(
replica_id=-1,
topics=[(
topic,
[(
partition,
payload.time,
payload.max_offsets)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_response(cls, response):
"""
Decode OffsetResponse into OffsetResponsePayloads
Arguments:
response: OffsetResponse
Returns: list of OffsetResponsePayloads
"""
return [
kafka.common.OffsetResponsePayload(topic, partition, error, tuple(offsets))
for topic, partitions in response.topics
for partition, error, offsets in partitions
]
@classmethod
def encode_metadata_request(cls, topics=(), payloads=None):
"""
Encode a MetadataRequest
Arguments:
topics: list of strings
"""
if payloads is not None:
topics = payloads
return kafka.protocol.metadata.MetadataRequest(topics)
@classmethod
def decode_metadata_response(cls, response):
return response
@classmethod
def encode_consumer_metadata_request(cls, client_id, correlation_id, payloads):
"""
Encode a ConsumerMetadataRequest
Arguments:
client_id: string
correlation_id: int
payloads: string (consumer group)
"""
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.CONSUMER_METADATA_KEY))
message.append(struct.pack('>h%ds' % len(payloads), len(payloads), payloads))
msg = b''.join(message)
return write_int_string(msg)
@classmethod
def decode_consumer_metadata_response(cls, data):
"""
Decode bytes to a ConsumerMetadataResponse
Arguments:
data: bytes to decode
"""
((correlation_id, error, nodeId), cur) = relative_unpack('>ihi', data, 0)
(host, cur) = read_short_string(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
return ConsumerMetadataResponse(error, nodeId, host, port)
@classmethod
def encode_offset_commit_request(cls, group, payloads):
"""
Encode an OffsetCommitRequest struct
Arguments:
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequestPayload
"""
return kafka.protocol.commit.OffsetCommitRequest_v0(
consumer_group=group,
topics=[(
topic,
[(
partition,
payload.offset,
payload.metadata)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_commit_response(cls, response):
"""
Decode OffsetCommitResponse to an OffsetCommitResponsePayload
Arguments:
response: OffsetCommitResponse
"""
return [
kafka.common.OffsetCommitResponsePayload(topic, partition, error)
for topic, partitions in response.topics
for partition, error in partitions
]
@classmethod
def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):
"""
Encode an OffsetFetchRequest struct. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequestPayload
from_kafka: bool, default False, set True for Kafka-committed offsets
"""
if from_kafka:
request_class = kafka.protocol.commit.OffsetFetchRequest_v1
else:
request_class = kafka.protocol.commit.OffsetFetchRequest_v0
return request_class(
consumer_group=group,
topics=[(
topic,
list(topic_payloads.keys()))
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_fetch_response(cls, response):
"""
Decode OffsetFetchResponse to OffsetFetchResponsePayloads
Arguments:
response: OffsetFetchResponse
"""
return [
kafka.common.OffsetFetchResponsePayload(
topic, partition, offset, metadata, error
)
for topic, partitions in response.topics
for partition, offset, metadata, error in partitions
]
def create_message(payload, key=None):
"""
Construct a Message
Arguments:
payload: bytes, the payload to send to Kafka
key: bytes, a key used for partition routing (optional)
"""
return kafka.common.Message(0, 0, key, payload)
def create_gzip_message(payloads, key=None, compresslevel=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
gzipped = gzip_encode(message_set, compresslevel=compresslevel)
codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP
return kafka.common.Message(0, 0x00 | codec, key, gzipped)
def create_snappy_message(payloads, key=None):
"""
Construct a Snappy Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
snapped = snappy_encode(message_set)
codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY
return kafka.common.Message(0, 0x00 | codec, key, snapped)
def create_message_set(messages, codec=CODEC_NONE, key=None, compresslevel=None):
"""Create a message set using the given codec.
If codec is CODEC_NONE, return a list of raw Kafka messages. Otherwise,
return a list containing a single codec-encoded message.
"""
if codec == CODEC_NONE:
return [create_message(m, k) for m, k in messages]
elif codec == CODEC_GZIP:
return [create_gzip_message(messages, key, compresslevel)]
elif codec == CODEC_SNAPPY:
return [create_snappy_message(messages, key)]
else:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
| gamechanger/kafka-python | kafka/protocol/legacy.py | Python | apache-2.0 | 14,397 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import encoding
from django.utils import timezone
from horizon.templatetags import sizeformat
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_URL = reverse('horizon:project:overview:index')
class UsageViewTests(test.BaseAdminViewTests):
@override_settings(OVERVIEW_DAYS_RANGE=None)
def test_usage(self):
self._test_usage(nova_stu_enabled=True, overview_days_range=None)
def test_usage_1_day(self):
self._test_usage(nova_stu_enabled=True)
@override_settings(
OVERVIEW_DAYS_RANGE=None,
OPENSTACK_USE_SIMPLE_TENANT_USAGE=False,
)
def test_usage_disabled(self):
self._test_usage(nova_stu_enabled=False, overview_days_range=None)
def test_usage_with_deleted_tenant(self):
self._test_usage(tenant_deleted=True)
def _get_start_end_range(self, overview_days_range):
now = timezone.now()
if overview_days_range:
start_day = now - datetime.timedelta(days=overview_days_range)
else:
start_day = datetime.date(now.year, now.month, 1)
return start_day, now
@test.create_mocks({api.nova: ('usage_list',),
api.keystone: ('tenant_list',)})
def _test_usage(self, nova_stu_enabled=True, tenant_deleted=False,
overview_days_range=1):
usage_list = [api.nova.NovaUsage(u) for u in self.usages.list()]
if tenant_deleted:
self.mock_tenant_list.return_value = [[self.tenants.first()],
False]
else:
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_usage_list.return_value = usage_list
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertTemplateUsed(res, 'admin/overview/usage.html')
self.assertIsInstance(res.context['usage'], usage.GlobalUsage)
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
usage_table = encoding.smart_str('''
<tr class="" data-object-id="1" id="global_usage__row__1">
<td class="sortable normal_column">test_tenant</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
</tr>
''' % (usage_list[0].vcpus,
sizeformat.diskgbformat(usage_list[0].local_gb),
sizeformat.mb_float_format(usage_list[0].memory_mb),
usage_list[0].vcpu_hours,
usage_list[0].disk_gb_hours,
usage_list[0].memory_mb_hours)
)
# test for deleted project
usage_table_deleted = encoding.smart_str('''
<tr class="" data-object-id="3" id="global_usage__row__3">
<td class="sortable normal_column">3 (Deleted)</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
</tr>
''' % (usage_list[1].vcpus,
sizeformat.diskgbformat(usage_list[1].local_gb),
sizeformat.mb_float_format(usage_list[1].memory_mb),
usage_list[1].vcpu_hours,
usage_list[1].disk_gb_hours,
usage_list[1].memory_mb_hours)
)
if nova_stu_enabled:
self.assertContains(res, usage_table, html=True)
if tenant_deleted:
self.assertContains(res, usage_table_deleted, html=True)
else:
self.assertNotContains(res, usage_table_deleted, html=True)
else:
self.assertNotContains(res, usage_table, html=True)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
if nova_stu_enabled:
start_day, now = self._get_start_end_range(overview_days_range)
self.mock_usage_list.assert_called_once_with(
test.IsHttpRequest(),
datetime.datetime(start_day.year,
start_day.month,
start_day.day, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0))
else:
self.mock_usage_list.assert_not_called()
@override_settings(OVERVIEW_DAYS_RANGE=None)
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True, overview_days_range=None)
def test_usage_csv_1_day(self):
self._test_usage_csv(nova_stu_enabled=True)
@override_settings(
OVERVIEW_DAYS_RANGE=None,
OPENSTACK_USE_SIMPLE_TENANT_USAGE=False,
)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False, overview_days_range=None)
@test.create_mocks({api.nova: ('usage_list',),
api.keystone: ('tenant_list',)})
def _test_usage_csv(self, nova_stu_enabled=True, overview_days_range=1):
self.mock_tenant_list.return_value = [self.tenants.list(), False]
usage_obj = [api.nova.NovaUsage(u) for u in self.usages.list()]
self.mock_usage_list.return_value = usage_obj
csv_url = reverse('horizon:admin:overview:index') + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'admin/overview/usage.csv')
self.assertIsInstance(res.context['usage'], usage.GlobalUsage)
hdr = '"Project Name","VCPUs","RAM (MB)","Disk (GB)","Usage (Hours)"'
self.assertContains(res, '%s\r\n' % hdr)
if nova_stu_enabled:
for obj in usage_obj:
row = '"{0}","{1}","{2}","{3}","{4:.2f}"\r\n'.format(
obj.project_name,
obj.vcpus,
obj.memory_mb,
obj.disk_gb_hours,
obj.vcpu_hours)
self.assertContains(res, row)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
if nova_stu_enabled:
start_day, now = self._get_start_end_range(overview_days_range)
self.mock_usage_list.assert_called_once_with(
test.IsHttpRequest(),
datetime.datetime(start_day.year,
start_day.month,
start_day.day,
0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0))
else:
self.mock_usage_list.assert_not_called()
| openstack/horizon | openstack_dashboard/dashboards/admin/overview/tests.py | Python | apache-2.0 | 8,099 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import glob
import itertools
import mmap
import os
import unittest
from typing import List
from parameterized import parameterized
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
class TestProjectStructure(unittest.TestCase):
def test_reference_to_providers_from_core(self):
for filename in glob.glob(f"{ROOT_FOLDER}/example_dags/**/*.py", recursive=True):
self.assert_file_not_contains(filename, "providers")
def test_deprecated_packages(self):
path_pattern = f"{ROOT_FOLDER}/airflow/contrib/**/*.py"
for filename in glob.glob(path_pattern, recursive=True):
if filename.endswith("/__init__.py"):
self.assert_file_contains(filename, "This package is deprecated.")
else:
self.assert_file_contains(filename, "This module is deprecated.")
def assert_file_not_contains(self, filename: str, pattern: str):
with open(filename, 'rb', 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content:
if content.find(bytes(pattern, 'utf-8')) != -1:
self.fail(f"File {filename} not contains pattern - {pattern}")
def assert_file_contains(self, filename: str, pattern: str):
with open(filename, 'rb', 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content:
if content.find(bytes(pattern, 'utf-8')) == -1:
self.fail(f"File {filename} contains illegal pattern - {pattern}")
def test_providers_modules_should_have_tests(self):
"""
Assert every module in /airflow/providers has a corresponding test_ file in tests/airflow/providers.
"""
# Deprecated modules that don't have corresponded test
expected_missing_providers_modules = {
(
'airflow/providers/amazon/aws/hooks/aws_dynamodb.py',
'tests/providers/amazon/aws/hooks/test_aws_dynamodb.py',
)
}
# TODO: Should we extend this test to cover other directories?
modules_files = glob.glob(f"{ROOT_FOLDER}/airflow/providers/**/*.py", recursive=True)
# Make path relative
modules_files = (os.path.relpath(f, ROOT_FOLDER) for f in modules_files)
# Exclude example_dags
modules_files = (f for f in modules_files if "/example_dags/" not in f)
# Exclude __init__.py
modules_files = (f for f in modules_files if not f.endswith("__init__.py"))
# Change airflow/ to tests/
expected_test_files = (
f'tests/{f.partition("/")[2]}' for f in modules_files if not f.endswith("__init__.py")
)
# Add test_ prefix to filename
expected_test_files = (
f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}'
for f in expected_test_files
if not f.endswith("__init__.py")
)
current_test_files = glob.glob(f"{ROOT_FOLDER}/tests/providers/**/*.py", recursive=True)
# Make path relative
current_test_files = (os.path.relpath(f, ROOT_FOLDER) for f in current_test_files)
# Exclude __init__.py
current_test_files = (f for f in current_test_files if not f.endswith("__init__.py"))
modules_files = set(modules_files)
expected_test_files = set(expected_test_files)
current_test_files = set(current_test_files)
missing_tests_files = expected_test_files - expected_test_files.intersection(current_test_files)
with self.subTest("Detect missing tests in providers module"):
expected_missing_test_modules = {pair[1] for pair in expected_missing_providers_modules}
missing_tests_files = missing_tests_files - set(expected_missing_test_modules)
assert set() == missing_tests_files
with self.subTest("Verify removed deprecated module also removed from deprecated list"):
expected_missing_modules = {pair[0] for pair in expected_missing_providers_modules}
removed_deprecated_module = expected_missing_modules - modules_files
if removed_deprecated_module:
self.fail(
"You've removed a deprecated module:\n"
f"{removed_deprecated_module}"
"\n"
"Thank you very much.\n"
"Can you remove it from the list of expected missing modules tests, please?"
)
def get_imports_from_file(filepath: str):
with open(filepath) as py_file:
content = py_file.read()
doc_node = ast.parse(content, filepath)
import_names: List[str] = []
for current_node in ast.walk(doc_node):
if not isinstance(current_node, (ast.Import, ast.ImportFrom)):
continue
for alias in current_node.names:
name = alias.name
fullname = f'{current_node.module}.{name}' if isinstance(current_node, ast.ImportFrom) else name
import_names.append(fullname)
return import_names
def filepath_to_module(filepath: str):
filepath = os.path.relpath(os.path.abspath(filepath), ROOT_FOLDER)
return filepath.replace("/", ".")[: -(len('.py'))]
def get_classes_from_file(filepath: str):
with open(filepath) as py_file:
content = py_file.read()
doc_node = ast.parse(content, filepath)
module = filepath_to_module(filepath)
results: List[str] = []
for current_node in ast.walk(doc_node):
if not isinstance(current_node, ast.ClassDef):
continue
name = current_node.name
if not name.endswith("Operator") and not name.endswith("Sensor") and not name.endswith("Operator"):
continue
results.append(f"{module}.{name}")
return results
class TestGoogleProviderProjectStructure(unittest.TestCase):
MISSING_EXAMPLE_DAGS = {
('cloud', 'adls_to_gcs'),
('cloud', 'sql_to_gcs'),
('cloud', 'bigquery_to_mysql'),
('cloud', 'cassandra_to_gcs'),
('cloud', 'mssql_to_gcs'),
('suite', 'drive'),
('ads', 'ads_to_gcs'),
}
# Those operators are deprecated and we do not need examples for them
DEPRECATED_OPERATORS = {
'airflow.providers.google.cloud.operators.cloud_storage_transfer_service'
'.CloudDataTransferServiceS3ToGCSOperator',
'airflow.providers.google.cloud.operators.cloud_storage_transfer_service'
'.CloudDataTransferServiceGCSToGCSOperator',
'airflow.providers.google.cloud.sensors.gcs.GCSObjectsWtihPrefixExistenceSensor',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitHadoopJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocScaleClusterOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitSparkJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitSparkSqlJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitHiveJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitPigJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitPySparkJobOperator',
'airflow.providers.google.cloud.operators.mlengine.MLEngineManageModelOperator',
'airflow.providers.google.cloud.operators.mlengine.MLEngineManageVersionOperator',
'airflow.providers.google.cloud.operators.dataflow.DataflowCreateJavaJobOperator',
'airflow.providers.google.cloud.operators.bigquery.BigQueryPatchDatasetOperator',
'airflow.providers.google.cloud.operators.dataflow.DataflowCreatePythonJobOperator',
'airflow.providers.google.cloud.operators.bigquery.BigQueryExecuteQueryOperator',
}
# Those operators should not have examples as they are never used standalone (they are abstract)
BASE_OPERATORS = {
'airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator',
'airflow.providers.google.cloud.operators.cloud_sql.CloudSQLBaseOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocJobBaseOperator',
}
# Please at the examples to those operators at the earliest convenience :)
MISSING_EXAMPLES_FOR_OPERATORS = {
'airflow.providers.google.cloud.operators.dataproc.DataprocInstantiateInlineWorkflowTemplateOperator',
'airflow.providers.google.cloud.operators.mlengine.MLEngineTrainingCancelJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetStoredInfoTypeOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPReidentifyContentOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobTriggerOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListDeidentifyTemplatesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListInspectTemplatesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListStoredInfoTypesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateInspectTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListJobTriggersOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPCancelDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetInspectTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListInfoTypesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListDLPJobsOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPRedactImageOperator',
'airflow.providers.google.cloud.operators.datastore.CloudDatastoreDeleteOperationOperator',
'airflow.providers.google.cloud.operators.datastore.CloudDatastoreGetOperationOperator',
'airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor',
'airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor',
}
def test_example_dags(self):
operators_modules = itertools.chain(
*(self.find_resource_files(resource_type=d) for d in ["operators", "sensors", "transfers"])
)
example_dags_files = self.find_resource_files(resource_type="example_dags")
# Generate tuple of department and service e.g. ('marketing_platform', 'display_video')
operator_sets = [(f.split("/")[-3], f.split("/")[-1].rsplit(".")[0]) for f in operators_modules]
example_sets = [
(f.split("/")[-3], f.split("/")[-1].rsplit(".")[0].replace("example_", "", 1))
for f in example_dags_files
]
def has_example_dag(operator_set):
for e in example_sets:
if e[0] != operator_set[0]:
continue
if e[1].startswith(operator_set[1]):
return True
return False
with self.subTest("Detect missing example dags"):
missing_example = {s for s in operator_sets if not has_example_dag(s)}
missing_example -= self.MISSING_EXAMPLE_DAGS
assert set() == missing_example
with self.subTest("Keep update missing example dags list"):
new_example_dag = set(example_sets).intersection(set(self.MISSING_EXAMPLE_DAGS))
if new_example_dag:
new_example_dag_text = '\n'.join(str(f) for f in new_example_dag)
self.fail(
"You've added a example dag currently listed as missing:\n"
f"{new_example_dag_text}"
"\n"
"Thank you very much.\n"
"Can you remove it from the list of missing example, please?"
)
with self.subTest("Remove extra elements"):
extra_example_dags = set(self.MISSING_EXAMPLE_DAGS) - set(operator_sets)
if extra_example_dags:
new_example_dag_text = '\n'.join(str(f) for f in extra_example_dags)
self.fail(
"You've added a example dag currently listed as missing:\n"
f"{new_example_dag_text}"
"\n"
"Thank you very much.\n"
"Can you remove it from the list of missing example, please?"
)
def test_missing_example_for_operator(self):
missing_operators = []
for resource_type in ["operators", "sensors", "transfers"]:
operator_files = set(
self.find_resource_files(top_level_directory="airflow", resource_type=resource_type)
)
for filepath in operator_files:
service_name = os.path.basename(filepath)[: -(len(".py"))]
example_dags = list(
glob.glob(
f"{ROOT_FOLDER}/airflow/providers/google/*/example_dags/example_{service_name}*.py"
)
)
if not example_dags:
# Ignore. We have separate tests that detect this.
continue
example_paths = {
path for example_dag in example_dags for path in get_imports_from_file(example_dag)
}
example_paths = {
path for path in example_paths if f'.{resource_type}.{service_name}.' in path
}
print("example_paths=", example_paths)
operators_paths = set(get_classes_from_file(f"{ROOT_FOLDER}/{filepath}"))
missing_operators.extend(operators_paths - example_paths)
full_set = set()
full_set.update(self.MISSING_EXAMPLES_FOR_OPERATORS)
full_set.update(self.DEPRECATED_OPERATORS)
full_set.update(self.BASE_OPERATORS)
assert set(missing_operators) == full_set
@parameterized.expand(
itertools.product(["_system.py", "_system_helper.py"], ["operators", "sensors", "transfers"])
)
def test_detect_invalid_system_tests(self, resource_type, filename_suffix):
operators_tests = self.find_resource_files(top_level_directory="tests", resource_type=resource_type)
operators_files = self.find_resource_files(top_level_directory="airflow", resource_type=resource_type)
files = {f for f in operators_tests if f.endswith(filename_suffix)}
expected_files = (f"tests/{f[8:]}" for f in operators_files)
expected_files = (f.replace(".py", filename_suffix).replace("/test_", "/") for f in expected_files)
expected_files = {f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}' for f in expected_files}
assert set() == files - expected_files
@staticmethod
def find_resource_files(
top_level_directory: str = "airflow",
department: str = "*",
resource_type: str = "*",
service: str = "*",
):
python_files = glob.glob(
f"{ROOT_FOLDER}/{top_level_directory}/providers/google/{department}/{resource_type}/{service}.py"
)
# Make path relative
resource_files = (os.path.relpath(f, ROOT_FOLDER) for f in python_files)
# Exclude __init__.py and pycache
resource_files = (f for f in resource_files if not f.endswith("__init__.py"))
return resource_files
class TestOperatorsHooks(unittest.TestCase):
def test_no_illegal_suffixes(self):
illegal_suffixes = ["_operator.py", "_hook.py", "_sensor.py"]
files = itertools.chain(
*(
glob.glob(f"{ROOT_FOLDER}/{part}/providers/**/{resource_type}/*.py", recursive=True)
for resource_type in ["operators", "hooks", "sensors", "example_dags"]
for part in ["airflow", "tests"]
)
)
invalid_files = [f for f in files if any(f.endswith(suffix) for suffix in illegal_suffixes)]
assert [] == invalid_files
| apache/incubator-airflow | tests/always/test_project_structure.py | Python | apache-2.0 | 17,394 |
#!/bin/python3
def aVeryBigSum(n, ar):
return sum(ar)
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = aVeryBigSum(n, ar)
print(result)
| sazzadBuet08/programming-contest | hackar_rank/infolytx_mock_hackar_rank/ABigSum.py | Python | apache-2.0 | 176 |
#!/usr/bin/python2
#
# Copyright 2012 Abid Hasan Mujtaba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Author: Abid H. Mujtaba
# Email: [email protected]
#
# Start Date: Aug. 9, 2012
# Last Revised: sep. 24, 2012
#
#
# This script is intended as a program that reads a configuration file and uses the information stored there-in to connect to a variety of IMAP servers and display header information about the emails in various folders (INBOX by default). It also has the capability of deleting selected emails. The advantage is that minimal information needs to be downloaded (i.e. only certain header fields) without needing to download the entire email and one can choose to delete unnecessary emails judging by the sender and/or subject only.
# Enable Python 3.x style print function:
from __future__ import print_function
import re
# Create global variables that implement global settings which are used by the following functions.
maxThreads = 5 # This value will be over-written by the global default and possibly a command-line argument
colorTitle = None
colorFlag = None
colorFrom = None
colorDate = None
colorSubjectSeen = None
colorSubjectUnseen = None
showFlags = None
def setOptions( configFile, configSpecFile ) :
'''
This function reads in the options from the configuration file and validates them using the configuration specification file passed to it. It creates a dictionary of options for each account which are used by the pollAccount() function to carry out its tasks. Additionally this function reads the 'global' section in the configuration file and creates and the globalSettings dictionary that contains the global settings for the program.
'''
from configobj import ConfigObj, ConfigObjError, flatten_errors
from validate import Validator
# Note the following code segment concerned with using ConfigObj and validating the entries has been inspired and in part copied from http://www.voidspace.org.uk/python/articles/configobj.shtml (an excellent tutorial on using ConfigObj by its author(s))
try:
config = ConfigObj( configFile, configspec = configSpecFile, file_error = True )
except (ConfigObjError, IOError), e:
print( 'Could not read "%s": %s' % (configFile, e) )
validator = Validator()
results = config.validate( validator )
if results != True : # Validation failed. Inform user of offending entries.
for (section_list, key, _) in flatten_errors( config, results ) :
if key is not None :
print( 'The "%s" key in the section "%s" failed validation' % (key, ','.join( section_list ) ) )
else :
print( 'The following section was missing: %s' % ','.join( section_list ) )
import sys
sys.exit(1)
# Validation successful so we move on to creating the 'servers' dictionary. We are implementing a default account paradigm which is not natively supported by ConfigObj. We want the ConfigParser ability where any option not provided in a subsection but contained in the 'DEFAULT' subsection are copied in to it. To achieve this we will need to know which entries are missing in each subsection without having them filled in using the default values from the config.spec file. To that end we read in the config file without reading the spec file (hence no spec defaults are read in).
configNoSpec = ConfigObj( configFile ) # Note since config passed validation we automatically know that configNoSpec is also valid.
# The first step is to copy out the default account section dictionary and use it as the basic dictionary for all accounts. We will over-write the options that are provided in each account sub-section as we read them.
listDefaultOptions = configNoSpec[ 'accounts' ][ 'DEFAULT' ].keys() # List of Default options as EXPLICITLY provided in the configuration file (hence the use of configNoSpec as compared to just config)
listAccounts = [ x for x in config[ 'accounts' ].keys() if x != 'DEFAULT' ] # List of Accounts that does NOT contain 'DEFAULT'. We are basically carrying out list subtraction here: completely removing certain elements from the list by using list comprehension along with a predicate
# Note: Everywhere a value needs to be read in we must use 'config' and NOT 'configNoSpec' since 'config' by virtue of knowing the required type of each option reads in the values as the correct type rather than as a string which is what we want.
servers = {} # Empty dictionary which we will populate with account configuration information
for account in listAccounts :
servers[ account ] = {} # Create sub-dictionary for account
servers[ account ][ 'name' ] = account # Saving account name for identification and laster use when the sub-dictionary is passed to pollAccount
for key, value in config[ 'accounts' ][ account ].items() :
servers[ account ][ key ] = value # Copy configuration information
# So far we have stored in the dictionary (for this account) the values specified explicitly and the global defaults from config.spec that are automatically loaded for missing options. Now we must over-write with the options that are not explicitly given but ARE explicitly defined in the 'DEFAULT' section since they carry precedence over the global defaults defined in the config.spec file (which should not ideally be edited by the user but rather represents the creator's fall-back default values in case an option is completely deleted by the user in the config file)
# Now we create a list of the options that are explicitly in DEFAULT but NOT in the specific account (Note the use of configNoSpec rather than config) :
listMissingDefaults = [ x for x in listDefaultOptions if x not in configNoSpec[ 'accounts' ][ account ].keys() ]
for key in listMissingDefaults :
servers[ account ][ key ] = config[ 'accounts' ][ 'DEFAULT' ][ key ]
# Now we read in the global settings:
globalSettings = {} # Create empty dictionary to populate
for key in config[ 'global' ].keys() :
globalSettings[ key ] = config[ 'global' ][ key ]
return servers, globalSettings
def argParse() :
'''
This function reads in the arguments passed to the program, validates them and if validated returns a parser.parse_args() returned object which contains the various arguments passed and which can then be used by the program as it sees fit.
'''
import argparse # This module gives powerful argument parsing abilities along with auto-generation of --help output.
# Specify the various arguments that the program expects and validate them. Additional arguments can be added as required.
parser = argparse.ArgumentParser( description = "A python script which simultaneously polls multiple IMAP accounts to display the subjects of all or only unseen messages in the specified folder (INBOX by default) without downloading complete messages.\n For further details please read the man page." )
parser.add_argument( "-c", "--config", help = "Specify the name and path to the configuration file. If not specified the program will use the default configuration file in $HOME/.fetchheaders/fetchheaders.conf. Note: The configuration specification file (fetchheaders.conf.spec) should not be altered casually and the program will only look for it in $HOME/.fetchheaders/" )
# For --accounts and --exclude which we wish to be mutually exclusive optional arguments we create a mutually exclusive group within the parser to hold them.
group = parser.add_mutually_exclusive_group()
group.add_argument( "-a", "--accounts", help = "Specify the names of IMAP accounts to be polled as a comma-separated list. e.g. -a Gmail,Hotmail. Only accounts specified in the configuration file are allowed." )
group.add_argument( "-x", "--exclude", help = "Specify the names of the IMAP accounts which are NOT to be polled, as a comma-separated list. e.g. -x Gmail,Hotmail. Only accounts specified in the configuration file are allowed to be excluded." )
parser.add_argument( "-n", "--numsonly", help = "Flag: Only show the number of unseen and total number of messages for the specified folder for each account.", action = "store_true" )
parser.add_argument( "--noColor", help = "Flag: Do NOT allow colored output. Useful for shells that don't allow colored text or when the output needs to piped to another application since colored text is implemented by encapsulating the text in xterm color escape codes.", action = "store_true" )
parser.add_argument( "--oldestFirst", help = "Flag: Show oldest email first i.e. chronological order.", action = "store_true" )
parser.add_argument( "-A", "--showAll", help = "Flag: Show all emails in specified folder, not just unseen ones.", action = "store_true" )
parser.add_argument( "--showFlags", help = "Flag: Show mutt-style flags (in square brackets) to indicate new/unseen and deleted emails when ALL emails are displayed (i.e. -A is issued).", action = "store_true" )
parser.add_argument( "-t", "--threads", help = "Specify the maximum number of parallel threads the program will use to simultaneously access IMAP servers. Set to 1 for serial (non-parallel) behaviour.", type = int)
parser.add_argument( "-T", "--terminal", help = "Flag: Show results in the terminal. Do NOT use urwid.", action = "store_true" )
# Begin reading in arguments and validate them:
args = parser.parse_args() # args contains the values of arguments passed. If incorrect arguments are passed the problem will be stopped here and argparse will display the appropriate error and help message.
return args
def applyArgs( args, servers, globalSettings ) :
'''
This function accepts both the arguments read by the script and the 'servers' object (dictionary) created by setOptions(). It will apply the arguments sent via command-line to the 'servers' and 'globalSettings' object to create and return a modified version reflecting these changes.
'''
# This function is where we carry out all operations necessary to implement the settings specified by command-line arguments.
# -a, --acounts. Limit accounts to the specified ones:
if args.accounts : # True if -a or --accounts has been specified
# We must perform some error checking on the arguments passed to the --accounts optional argument
newServers = {} # Create a new dictionary we will populate ONLY with the specified accounts
for item in args.accounts.split( ',' ) : # We are expecting a comma-separated list
# We create a list of servers the START of whose names (lowercase) matches the item in the argument list currently under consideration
matching_servers = [x for x in servers.keys() if re.match('^' + item.lower(), x.lower())]
if matching_servers: # A match has occurred
for server in matching_servers: # All matching servers are added to the list displayed
newServers[ server ] = servers[ server ]
else: # No match has occurred. This is an error.
print( '\nError: ' + item + ' is not the beginning of a valid IMAP account name specified in the configuration file.' )
import sys
sys.exit(1)
servers = newServers
# -x, --exclude. Does NOT poll the accounts specified with this argument:
if args.exclude : # True if -x or --exclude has been specified
# We must perform some error checking on the arguments passed to the --exclude optional argument
excludedAccounts = [] # Empty list which we will populate with the excluded accounts
newServers = {} # Empty dictionary with which we will construct the new 'servers' dictionary without the excluded accounts
for item in args.exclude.split( ',' ) : # We are expecting a comma-separated list
if not item in servers.keys() : # If this item in the comma-separated list is NOT an account specified in the configuration file
print( '\nError: ' + item + ' is not a vlid IMAP account name specified in the configuration file.' )
import sys
sys.exit(1)
else :
excludedAccounts.append( item )
# Now we remove the excluded accounts when we create the new 'servers' dictionary:
for account in servers.keys() :
if not account in excludedAccounts : # The current account is not in the excluded list and so can be added to the servers dictionary:
newServers[ account ] = servers[ account ]
# Place the newly constructed dicionary (with accounts excluded) in to the original 'servers' dictionary:
servers = newServers
# -n, --numsonly. If specified only the total and unseen number of messages is to be displayed. Similar to 'fetchmail -c'.
if args.numsonly :
for account in servers.keys() :
servers[ account ][ 'showOnlyNums' ] = True
# -T, --terminal. If specified the output is displayed on the terminal (stdout) and 'urwid' is NOT used.
if args.terminal:
globalSettings[ 'terminal' ] = True
else : globalSettings[ 'terminal' ] = False
# --no-color. If specified the output of the program should NOT be colored.
if args.noColor :
globalSettings[ 'color' ] = False
# -A, --showAll. Show all emails not just unseen ones.
if args.showAll :
for account in servers.keys() :
servers[ account ][ 'showUnseen' ] = False
globalSettings[ 'showFlags' ] = True # Flags are shown by default whenever ALL emails are viewed whether --showFlags is passed or not.
# --oldestFirst. Show oldest email first i.e. in chronological order.
if args.oldestFirst :
for account in servers.keys() :
servers[ account ][ 'latestEmailFirst' ] = False
# --showFlags. Show mutt-style flags (in square brackets) when all emails are being displayed.
if args.showFlags :
globalSettings[ 'showFlags' ] = True
# -t, --threads. Set max. number of parallel threads.
if args.threads :
globalSettings[ 'maxThreads' ] = args.threads
return servers, globalSettings
def applyGlobalSettings( globalSettings ) :
'''
This function applies the global settings defined in the dictionary 'globalSettings' (created using the configuration file and command-line arguments).
'''
# Apply maxThreads setting:
global maxThreads
maxThreads = globalSettings[ 'maxThreads' ]
# Apply showFlags settings:
global showFlags
showFlags = globalSettings[ 'showFlags' ]
# Apply color settings:
if globalSettings[ 'color' ] : # output is to be colored
global colorTitle, colorFlag, colorDate, colorFrom, colorSubjectSeen, colorSubjectUnseen # Accessing global text color variables
colorTitle = globalSettings[ 'colorTitle' ]
colorFlag = globalSettings[ 'colorFlag' ]
colorSubjectSeen = globalSettings[ 'colorSubjectSeen' ]
colorSubjectUnseen = globalSettings[ 'colorSubjectUnseen' ]
colorDate = globalSettings[ 'colorDate' ]
colorFrom = globalSettings[ 'colorFrom' ]
def display( out ) :
'''
Accepts an Output data structure and prints out the results to the screen.
Note: This function carries out all formatting for the output using the purely data-oriented Output object as input. The output is in a text format which can be piped forward
'''
from miscClasses import colorWidth as cW # Custom function that sets width of text fields and colors it.
print( cW( out.settings[ 'name' ] + ':', 12, colorTitle ), end = '' ) # Print name of account and allow for further text
if out.settings[ 'showNums' ] :
print( "( total: %d | unseen: %d )" % (out.numAll, out.numUnseen) )
print( '\n' )
# Preamble printed. Now start printing individual email information
if out.settings[ 'showUnseen' ] : # Show only unseen messages
for ii in range( len( out.emails ) ) :
email = out.emails[ ii ]
print( cW( str(ii + 1), out.numDigits, align = '>' ) + '. ' + cW( email.Date, 17, colorDate ) + ' ' + cW( email.From, 30, colorFrom ) + ' ' + cW( email.Subject, 120, colorSubjectUnseen, fill = False ) )
else : # Show ALL messages. Different formatting scheme.
if showFlags : # Global setting which declares that the flags associated with each message must be displayed
flags = lambda x : ' [ ' + cW( x, 2, colorFlag ) + '] '
else :
flags = lambda x : '. '
for ii in range( len( out.emails ) ) :
email = out.emails[ ii ]
if email.Seen : # Email has a Seen flag.
flag = ' '
colorSubject = colorSubjectSeen
else :
flag = 'N'
colorSubject = colorSubjectUnseen
print( cW( str(ii + 1), out.numDigits, align = '>' ) + flags( flag ) + cW( email.Date, 17, colorDate ) + ' ' + cW( email.From, 30, colorFrom ) + ' ' + cW( email.Subject, 120, colorSubject ) )
def main() :
'''
Main function that starts the execution of all of the code.
'''
args = argParse()
# Specify default locations for configuration and specification files:
import os
homeFolder = os.getenv( "HOME" ) # Basically the value in $HOME
packageFolder = '/usr/local/share/fetchheaders' # Location of folder containing all package files
# packageFolder = '.'
fileConf = homeFolder + '/.fetchheaders.conf'
fileSpec = packageFolder + '/fetchheaders.conf.spec' # Path to config specification file
# Check if a configuration file has been specified using the -c or --config flag.
if args.config : # A configuration file has been provided
fileConf = args.config
# Read in settings and options from configuration files :
servers, globalSettings = setOptions( fileConf, fileSpec )
# Override settings and options from command-line arguments :
servers, globalSettings = applyArgs( args, servers, globalSettings )
# Apply Global Settings. These are applied outside of pollAccount which acts on each account independantly.
applyGlobalSettings( globalSettings ) # Apply the global settings contained in the 'globalSettings' dictionary we created from the configuration file and command-line arguments
# Now we determine whether the output is intended to go to the terminal (stdout) straight or passed on to urwid
if globalSettings[ 'terminal' ]: # Do NOT use urwid
from miscClasses import threadedExec
for out in threadedExec( servers, maxThreads ):
if out.error: # If an error occurs while constructing the Output object the exception is caught and the error flag is set
from miscClasses import colorWidth as cW
print( cW( out.settings[ 'name' ] + ':', 12, colorTitle ), end = '' ) # We indicate in the output that an Error has occurred.
print( "Error!\n\n" )
else:
display(out)
else:
# Use urwid to display the results, interact with the display and possibly flag messages for deletion:
from urwidDisplay import urwidDisplay
# Create instance of the imported class to create and start the urwid loop to display emails
settings = { 'maxThreads': maxThreads, 'showFlags': showFlags }
urwidDisplay( servers, settings )
# Main execution of the program begins here:
main()
| abid-mujtaba/fetchheaders | fetchheaders.py | Python | apache-2.0 | 20,367 |
#!/usr/bin/env python
"""Tests for constructing supercell models."""
import itertools
import numpy as np
from numpy.testing import assert_allclose
import pytest
from parameters import KPT, T_VALUES
import tbmodels
def get_equivalent_k(k, supercell_size):
return itertools.product(
*[
(np.linspace(0, 1, s, endpoint=False) + ki / s)
for ki, s in zip(k, supercell_size)
]
)
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("supercell_size", [(1, 1, 1), (2, 1, 1), (2, 3, 2)])
def test_supercell_simple(get_model, t_values, supercell_size, sparse):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for a simple model.
"""
model = get_model(*t_values, sparse=sparse)
supercell_model = model.supercell(size=supercell_size)
for k in KPT:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("supercell_size", [(5, 4), (1, 1), (2, 3)])
def test_supercell_simple_2d(get_model, t_values, supercell_size):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for a simple model.
"""
model = get_model(*t_values, dim=2)
supercell_model = model.supercell(size=supercell_size)
for k in [(-0.12341, 0.92435), (0, 0), (0.65432, -0.1561)]:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("supercell_size", [(5, 4, 2, 2), (1, 1, 1, 1), (2, 2, 3, 2)])
def test_supercell_simple_4d(get_model, t_values, supercell_size):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for a simple model.
"""
model = get_model(*t_values, dim=4)
supercell_model = model.supercell(size=supercell_size)
for k in [
(-0.12341, 0.92435, 0.32, 0.1212),
(0, 0, 0, 0),
(0.65432, -0.1561, 0.2352346, -0.92345),
]:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
@pytest.mark.parametrize("supercell_size", [(1, 1, 1), (2, 1, 1)])
def test_supercell_inas(sample, supercell_size):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for the realistic InAs model.
"""
model = tbmodels.io.load(sample("InAs_nosym.hdf5"))
supercell_model = model.supercell(size=supercell_size)
for k in [(-0.4, 0.1, 0.45), (0, 0, 0), (0.41126, -0.153112, 0.2534)]:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
def test_supercell_model_equal(sample, models_close):
"""
Regression test checking that a supercell model matches a stored
reference.
"""
model = tbmodels.io.load(sample("InAs_nosym.hdf5"))
supercell_model = model.supercell(size=(1, 2, 3))
supercell_reference = tbmodels.io.load(sample("InAs_supercell_reference.hdf5"))
models_close(supercell_model, supercell_reference, ignore_sparsity=True)
| Z2PackDev/TBmodels | tests/test_supercell.py | Python | apache-2.0 | 4,214 |
# -*- coding:utf-8 -*-
from .inception import InceptionDao
from .models import DataMaskingRules, DataMaskingColumns
from simplejson import JSONDecodeError
import simplejson as json
import re
inceptionDao = InceptionDao()
class Masking(object):
# 脱敏数据
def data_masking(self, cluster_name, db_name, sql, sql_result):
result = {'status': 0, 'msg': 'ok', 'data': []}
# 通过inception获取语法树,并进行解析
try:
print_info = self.query_tree(sql, cluster_name, db_name)
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return result
if print_info is None:
result['status'] = 1
result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误,无法完成脱敏校验,如果需要继续查询请关闭校验'
elif print_info['errlevel'] != 0:
result['status'] = 2
result['msg'] = 'inception返回异常,无法完成脱敏校验,如果需要继续查询请关闭校验:\n' + print_info['errmsg']
else:
query_tree = print_info['query_tree']
# 获取命中脱敏规则的列数据
try:
table_hit_columns, hit_columns = self.analy_query_tree(query_tree, cluster_name)
except Exception as msg:
result['status'] = 2
result['msg'] = '解析inception语法树获取表信息出错,无法完成脱敏校验,如果需要继续查询请关闭校验:{}\nquery_tree:{}'.format(str(msg),
print_info)
return result
# 存在select * 的查询,遍历column_list,获取命中列的index,添加到hit_columns
if table_hit_columns and sql_result.get('rows'):
column_list = sql_result['column_list']
table_hit_column = {}
for column_info in table_hit_columns:
table_hit_column_info = {}
rule_type = column_info['rule_type']
table_hit_column_info[column_info['column_name']] = rule_type
table_hit_column.update(table_hit_column_info)
for index, item in enumerate(column_list):
if item in table_hit_column.keys():
column = {}
column['column_name'] = item
column['index'] = index
column['rule_type'] = table_hit_column.get(item)
hit_columns.append(column)
# 对命中规则列hit_columns的数据进行脱敏
# 获取全部脱敏规则信息,减少循环查询,提升效率
DataMaskingRulesOb = DataMaskingRules.objects.all()
if hit_columns and sql_result.get('rows'):
rows = list(sql_result['rows'])
for column in hit_columns:
index = column['index']
for idx, item in enumerate(rows):
rows[idx] = list(item)
rows[idx][index] = self.regex(DataMaskingRulesOb, column['rule_type'], rows[idx][index])
sql_result['rows'] = rows
return result
# 通过inception获取语法树
def query_tree(self, sqlContent, cluster_name, dbName):
try:
print_info = inceptionDao.query_print(sqlContent, cluster_name, dbName)
except Exception as e:
raise Exception('通过inception获取语法树异常,请检查inception配置,并确保inception可以访问实例:' + str(e))
if print_info:
id = print_info[0][0]
statement = print_info[0][1]
# 返回值为非0的情况下,说明是有错的,1表示警告,不影响执行,2表示严重错误,必须修改
errlevel = print_info[0][2]
query_tree = print_info[0][3]
errmsg = print_info[0][4]
# 提交给inception语法错误的情况
if errmsg == 'Global environment':
errlevel = 2
errmsg = 'Global environment: ' + query_tree
if errlevel == 0:
pass
# print(json.dumps(json.loads(query_tree), indent=4, sort_keys=False, ensure_ascii=False))
return {'id': id, 'statement': statement, 'errlevel': errlevel, 'query_tree': query_tree,
'errmsg': errmsg}
else:
return None
# 解析语法树,获取语句涉及的表,用于查询权限限制
def query_table_ref(self, sqlContent, cluster_name, dbName):
result = {'status': 0, 'msg': 'ok', 'data': []}
try:
print_info = self.query_tree(sqlContent, cluster_name, dbName)
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return result
if print_info is None:
result['status'] = 1
result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误,无法校验表权限,如果需要继续查询请关闭校验'
elif print_info['errlevel'] != 0:
result['status'] = 2
result['msg'] = 'inception返回异常,无法校验表权限,如果需要继续查询请关闭校验:\n' + print_info['errmsg']
else:
try:
table_ref = json.loads(print_info['query_tree'])['table_ref']
except Exception:
try:
table_ref = json.loads(print_info['query_tree'])['table_ref']
except JSONDecodeError:
try:
table_ref = json.loads(repair_json_str(print_info['query_tree']))['table_ref']
except JSONDecodeError as msg:
result['status'] = 2
result['msg'] = '通过inception语法树解析表信息出错,无法校验表权限,如果需要继续查询请关闭校验:{}\nquery_tree:{}'.format(str(msg),
print_info)
table_ref = ''
result['data'] = table_ref
return result
# 解析query_tree,获取语句信息,并返回命中脱敏规则的列信息
def analy_query_tree(self, query_tree, cluster_name):
try:
query_tree_dict = json.loads(query_tree)
except JSONDecodeError:
query_tree_dict = json.loads(repair_json_str(query_tree))
select_list = query_tree_dict.get('select_list')
table_ref = query_tree_dict.get('table_ref')
# 获取全部脱敏字段信息,减少循环查询,提升效率
DataMaskingColumnsOb = DataMaskingColumns.objects.all()
# 判断语句涉及的表是否存在脱敏字段配置
is_exist = False
for table in table_ref:
if DataMaskingColumnsOb.filter(cluster_name=cluster_name,
table_schema=table['db'],
table_name=table['table'],
active=1).exists():
is_exist = True
# 不存在脱敏字段则直接跳过规则解析
if is_exist:
# 遍历select_list
columns = []
hit_columns = [] # 命中列
table_hit_columns = [] # 涉及表命中的列,仅select *需要
# 判断是否存在不支持脱敏的语法
for select_item in select_list:
if select_item['type'] not in ('FIELD_ITEM', 'aggregate'):
raise Exception('不支持该查询语句脱敏!')
if select_item['type'] == 'aggregate':
if select_item['aggregate'].get('type') not in ('FIELD_ITEM', 'INT_ITEM'):
raise Exception('不支持该查询语句脱敏!')
# 获取select信息的规则,仅处理type为FIELD_ITEM和aggregate类型的select信息,如[*],[*,column_a],[column_a,*],[column_a,a.*,column_b],[a.*,column_a,b.*],
select_index = [
select_item['field'] if select_item['type'] == 'FIELD_ITEM' else select_item['aggregate'].get('field')
for
select_item in select_list if select_item['type'] in ('FIELD_ITEM', 'aggregate')]
# 处理select_list,为统一的{'type': 'FIELD_ITEM', 'db': 'archer_master', 'table': 'sql_users', 'field': 'email'}格式
select_list = [select_item if select_item['type'] == 'FIELD_ITEM' else select_item['aggregate'] for
select_item in select_list if select_item['type'] in ('FIELD_ITEM', 'aggregate')]
if select_index:
# 如果发现存在field='*',则遍历所有表,找出所有的命中字段
if '*' in select_index:
# 涉及表命中的列
for table in table_ref:
hit_columns_info = self.hit_table(DataMaskingColumnsOb, cluster_name, table['db'],
table['table'])
table_hit_columns.extend(hit_columns_info)
# 几种不同查询格式
# [*]
if re.match(r"^(\*,?)+$", ','.join(select_index)):
hit_columns = []
# [*,column_a]
elif re.match(r"^(\*,)+(\w,?)+$", ','.join(select_index)):
# 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,index采取后切片
for index, item in enumerate(select_list):
item['index'] = index - len(select_list)
if item.get('field') != '*':
columns.append(item)
# [column_a, *]
elif re.match(r"^(\w,?)+(\*,?)+$", ','.join(select_index)):
# 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,index采取前切片
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') != '*':
columns.append(item)
# [column_a,a.*,column_b]
elif re.match(r"^(\w,?)+(\*,?)+(\w,?)+$", ','.join(select_index)):
# 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,*前面的字段index采取前切片,*后面的字段采取后切片
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') == '*':
first_idx = index
break
select_list.reverse()
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') == '*':
last_idx = len(select_list) - index - 1
break
select_list.reverse()
for index, item in enumerate(select_list):
if item.get('field') != '*' and index < first_idx:
item['index'] = index
if item.get('field') != '*' and index > last_idx:
item['index'] = index - len(select_list)
columns.append(item)
# [a.*, column_a, b.*]
else:
raise Exception('不支持select信息为[a.*, column_a, b.*]格式的查询脱敏!')
# 没有*的查询,直接遍历查询命中字段,query_tree的列index就是查询语句列的index
else:
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') != '*':
columns.append(item)
# 格式化命中的列信息
for column in columns:
hit_info = self.hit_column(DataMaskingColumnsOb, cluster_name, column.get('db'), column.get('table'),
column.get('field'))
if hit_info['is_hit']:
hit_info['index'] = column['index']
hit_columns.append(hit_info)
else:
table_hit_columns = None
hit_columns = None
return table_hit_columns, hit_columns
# 判断字段是否命中脱敏规则,如果命中则返回脱敏的规则id和规则类型
def hit_column(self, DataMaskingColumnsOb, cluster_name, table_schema, table_name, column_name):
column_info = DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table_schema,
table_name=table_name, column_name=column_name, active=1)
hit_column_info = {}
hit_column_info['cluster_name'] = cluster_name
hit_column_info['table_schema'] = table_schema
hit_column_info['table_name'] = table_name
hit_column_info['column_name'] = column_name
hit_column_info['rule_type'] = 0
hit_column_info['is_hit'] = False
# 命中规则
if column_info:
hit_column_info['rule_type'] = column_info[0].rule_type
hit_column_info['is_hit'] = True
return hit_column_info
# 获取表中所有命中脱敏规则的字段信息
def hit_table(self, DataMaskingColumnsOb, cluster_name, table_schema, table_name):
columns_info = DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table_schema,
table_name=table_name, active=1)
# 命中规则
hit_columns_info = []
for column in columns_info:
hit_column_info = {}
hit_column_info['cluster_name'] = cluster_name
hit_column_info['table_schema'] = table_schema
hit_column_info['table_name'] = table_name
hit_column_info['is_hit'] = True
hit_column_info['column_name'] = column.column_name
hit_column_info['rule_type'] = column.rule_type
hit_columns_info.append(hit_column_info)
return hit_columns_info
# 利用正则表达式脱敏数据
def regex(self, DataMaskingRulesOb, rule_type, value):
rules_info = DataMaskingRulesOb.get(rule_type=rule_type)
if rules_info:
rule_regex = rules_info.rule_regex
hide_group = rules_info.hide_group
# 正则匹配必须分组,隐藏的组会使用****代替
try:
p = re.compile(rule_regex)
m = p.search(str(value))
masking_str = ''
for i in range(m.lastindex):
if i == hide_group - 1:
group = '****'
else:
group = m.group(i + 1)
masking_str = masking_str + group
return masking_str
except Exception:
return value
else:
return value
def repair_json_str(json_str):
# 处理JSONDecodeError: Expecting property name enclosed in double quotes
# inception语法树出现{"a":1,}、["a":1,]、{'a':1}、[, { }]
json_str = re.sub(r"{\s*'(.+)':", r'{"\1":', json_str)
json_str = re.sub(r",\s*?]", "]", json_str)
json_str = re.sub(r",\s*?}", "}", json_str)
json_str = re.sub(r"\[,\s*?{", "[{", json_str)
json_str = json_str.replace("'", "\"")
return json_str
| jly8866/archer | sql/data_masking.py | Python | apache-2.0 | 16,236 |
import Tkinter as tk
def foo(*args):
print "foo!", args
import sys; sys.stdout.flush()
def __extend__(app):
extension = KeywordExtension(app)
app.bind_class("all", "<F5>", extension.make_keyword)
# this needs to add something to the tools menu...
class KeywordExtension(object):
def __init__(self, app):
self.app = app
pass
def make_keyword(self, event=None):
# N.B. this is the editor_page object
editor = self.app.get_current_editor()
rows = editor.get_selected_rows()
print rows
import sys; sys.stdout.flush()
# now I want to do something like:
'''
editor.delete_selected_rows()
editor.new_keyword(rows)
=> prompts user for a name (with a 'place'd dialog rather than a popup?
then creates the keyword with that name, and replaces the selected
text with a reference to that keyword
'''
self.app.status_message("an extension says hello; you have selected %s rows" % len(rows))
| boakley/robotframework-workbench | rwb/editor/extensions/keywords.py | Python | apache-2.0 | 1,061 |
"""This example demonstrates the usage of SigOpt with Ray Tune.
It also checks that it is usable with a separate scheduler.
"""
import sys
import time
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.sigopt import SigOptSearch
def evaluate(step, width, height):
return (0.1 + width * step / 100) ** (-1) + height * 0.01
def easy_objective(config):
# Hyperparameters
width, height = config["width"], config["height"]
for step in range(config["steps"]):
# Iterative training function - can be any arbitrary training procedure
intermediate_score = evaluate(step, width, height)
# Feed the score back back to Tune.
tune.report(iterations=step, mean_loss=intermediate_score)
time.sleep(0.1)
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
if "SIGOPT_KEY" not in os.environ:
if args.smoke_test:
print("SigOpt API Key not found. Skipping smoke test.")
sys.exit(0)
else:
raise ValueError(
"SigOpt API Key not found. Please set the SIGOPT_KEY "
"environment variable."
)
space = [
{
"name": "width",
"type": "int",
"bounds": {"min": 0, "max": 20},
},
{
"name": "height",
"type": "int",
"bounds": {"min": -100, "max": 100},
},
]
algo = SigOptSearch(
space,
name="SigOpt Example Experiment",
max_concurrent=1,
metric="mean_loss",
mode="min",
)
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
analysis = tune.run(
easy_objective,
name="my_exp",
search_alg=algo,
scheduler=scheduler,
num_samples=4 if args.smoke_test else 100,
config={"steps": 10},
)
print(
"Best hyperparameters found were: ",
analysis.get_best_config("mean_loss", "min"),
)
| ray-project/ray | python/ray/tune/examples/sigopt_example.py | Python | apache-2.0 | 2,215 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dannysite_web.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| manyunkai/dannysite4 | src/manage.py | Python | apache-2.0 | 256 |
from hazelcast.protocol.builtin import FixSizedTypesCodec, CodecUtil
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import END_FRAME_BUF, END_FINAL_FRAME_BUF, SIZE_OF_FRAME_LENGTH_AND_FLAGS, create_initial_buffer_custom
from hazelcast.sql import SqlColumnMetadata
from hazelcast.protocol.builtin import StringCodec
_TYPE_ENCODE_OFFSET = 2 * SIZE_OF_FRAME_LENGTH_AND_FLAGS
_TYPE_DECODE_OFFSET = 0
_NULLABLE_ENCODE_OFFSET = _TYPE_ENCODE_OFFSET + INT_SIZE_IN_BYTES
_NULLABLE_DECODE_OFFSET = _TYPE_DECODE_OFFSET + INT_SIZE_IN_BYTES
_INITIAL_FRAME_SIZE = _NULLABLE_ENCODE_OFFSET + BOOLEAN_SIZE_IN_BYTES - SIZE_OF_FRAME_LENGTH_AND_FLAGS
class SqlColumnMetadataCodec:
@staticmethod
def encode(buf, sql_column_metadata, is_final=False):
initial_frame_buf = create_initial_buffer_custom(_INITIAL_FRAME_SIZE)
FixSizedTypesCodec.encode_int(initial_frame_buf, _TYPE_ENCODE_OFFSET, sql_column_metadata.type)
FixSizedTypesCodec.encode_boolean(initial_frame_buf, _NULLABLE_ENCODE_OFFSET, sql_column_metadata.nullable)
buf.extend(initial_frame_buf)
StringCodec.encode(buf, sql_column_metadata.name)
if is_final:
buf.extend(END_FINAL_FRAME_BUF)
else:
buf.extend(END_FRAME_BUF)
@staticmethod
def decode(msg):
msg.next_frame()
initial_frame = msg.next_frame()
type = FixSizedTypesCodec.decode_int(initial_frame.buf, _TYPE_DECODE_OFFSET)
is_nullable_exists = False
nullable = False
if len(initial_frame.buf) >= _NULLABLE_DECODE_OFFSET + BOOLEAN_SIZE_IN_BYTES:
nullable = FixSizedTypesCodec.decode_boolean(initial_frame.buf, _NULLABLE_DECODE_OFFSET)
is_nullable_exists = True
name = StringCodec.decode(msg)
CodecUtil.fast_forward_to_end_frame(msg)
return SqlColumnMetadata(name, type, is_nullable_exists, nullable)
| hazelcast/hazelcast-python-client | hazelcast/protocol/codec/custom/sql_column_metadata_codec.py | Python | apache-2.0 | 1,925 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.metadata_service import pagers
from google.cloud.aiplatform_v1.types import artifact
from google.cloud.aiplatform_v1.types import artifact as gca_artifact
from google.cloud.aiplatform_v1.types import context
from google.cloud.aiplatform_v1.types import context as gca_context
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import event
from google.cloud.aiplatform_v1.types import execution
from google.cloud.aiplatform_v1.types import execution as gca_execution
from google.cloud.aiplatform_v1.types import lineage_subgraph
from google.cloud.aiplatform_v1.types import metadata_schema
from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema
from google.cloud.aiplatform_v1.types import metadata_service
from google.cloud.aiplatform_v1.types import metadata_store
from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport
from .client import MetadataServiceClient
class MetadataServiceAsyncClient:
"""Service for reading and writing metadata entries."""
_client: MetadataServiceClient
DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT
artifact_path = staticmethod(MetadataServiceClient.artifact_path)
parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path)
context_path = staticmethod(MetadataServiceClient.context_path)
parse_context_path = staticmethod(MetadataServiceClient.parse_context_path)
execution_path = staticmethod(MetadataServiceClient.execution_path)
parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path)
metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path)
parse_metadata_schema_path = staticmethod(
MetadataServiceClient.parse_metadata_schema_path
)
metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path)
parse_metadata_store_path = staticmethod(
MetadataServiceClient.parse_metadata_store_path
)
common_billing_account_path = staticmethod(
MetadataServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
MetadataServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(MetadataServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
MetadataServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
MetadataServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
MetadataServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(MetadataServiceClient.common_project_path)
parse_common_project_path = staticmethod(
MetadataServiceClient.parse_common_project_path
)
common_location_path = staticmethod(MetadataServiceClient.common_location_path)
parse_common_location_path = staticmethod(
MetadataServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceAsyncClient: The constructed client.
"""
return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceAsyncClient: The constructed client.
"""
return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return MetadataServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> MetadataServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MetadataServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, MetadataServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the metadata service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MetadataServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = MetadataServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_metadata_store(
self,
request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None,
*,
parent: str = None,
metadata_store: gca_metadata_store.MetadataStore = None,
metadata_store_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Initializes a MetadataStore, including allocation of
resources.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_metadata_store():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateMetadataStoreRequest(
parent="parent_value",
)
# Make the request
operation = client.create_metadata_store(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]):
The request object. Request message for
[MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore].
parent (:class:`str`):
Required. The resource name of the Location where the
MetadataStore should be created. Format:
``projects/{project}/locations/{location}/``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
metadata_store (:class:`google.cloud.aiplatform_v1.types.MetadataStore`):
Required. The MetadataStore to
create.
This corresponds to the ``metadata_store`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
metadata_store_id (:class:`str`):
The {metadatastore} portion of the resource name with
the format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
If not provided, the MetadataStore's ID will be a UUID
generated by the service. Must be 4-128 characters in
length. Valid characters are ``/[a-z][0-9]-/``. Must be
unique across all MetadataStores in the parent Location.
(Otherwise the request will fail with ALREADY_EXISTS, or
PERMISSION_DENIED if the caller can't view the
preexisting MetadataStore.)
This corresponds to the ``metadata_store_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be
queried.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, metadata_store, metadata_store_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.CreateMetadataStoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if metadata_store is not None:
request.metadata_store = metadata_store
if metadata_store_id is not None:
request.metadata_store_id = metadata_store_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_metadata_store,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_metadata_store.MetadataStore,
metadata_type=metadata_service.CreateMetadataStoreOperationMetadata,
)
# Done; return the response.
return response
async def get_metadata_store(
self,
request: Union[metadata_service.GetMetadataStoreRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_store.MetadataStore:
r"""Retrieves a specific MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_metadata_store():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetMetadataStoreRequest(
name="name_value",
)
# Make the request
response = client.get_metadata_store(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]):
The request object. Request message for
[MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore].
name (:class:`str`):
Required. The resource name of the MetadataStore to
retrieve. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.MetadataStore:
Instance of a metadata store.
Contains a set of metadata that can be
queried.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.GetMetadataStoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_metadata_store,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_metadata_stores(
self,
request: Union[metadata_service.ListMetadataStoresRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMetadataStoresAsyncPager:
r"""Lists MetadataStores for a Location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_metadata_stores():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListMetadataStoresRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_metadata_stores(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]):
The request object. Request message for
[MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores].
parent (:class:`str`):
Required. The Location whose MetadataStores should be
listed. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager:
Response message for
[MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.ListMetadataStoresRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_metadata_stores,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMetadataStoresAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def delete_metadata_store(
self,
request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single MetadataStore and all its child
resources (Artifacts, Executions, and Contexts).
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_metadata_store():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteMetadataStoreRequest(
name="name_value",
)
# Make the request
operation = client.delete_metadata_store(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]):
The request object. Request message for
[MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore].
name (:class:`str`):
Required. The resource name of the MetadataStore to
delete. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.DeleteMetadataStoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_metadata_store,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata,
)
# Done; return the response.
return response
async def create_artifact(
self,
request: Union[metadata_service.CreateArtifactRequest, dict] = None,
*,
parent: str = None,
artifact: gca_artifact.Artifact = None,
artifact_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_artifact.Artifact:
r"""Creates an Artifact associated with a MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_artifact():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateArtifactRequest(
parent="parent_value",
)
# Make the request
response = client.create_artifact(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]):
The request object. Request message for
[MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact].
parent (:class:`str`):
Required. The resource name of the MetadataStore where
the Artifact should be created. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`):
Required. The Artifact to create.
This corresponds to the ``artifact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
artifact_id (:class:`str`):
The {artifact} portion of the resource name with the
format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}``
If not provided, the Artifact's ID will be a UUID
generated by the service. Must be 4-128 characters in
length. Valid characters are ``/[a-z][0-9]-/``. Must be
unique across all Artifacts in the parent MetadataStore.
(Otherwise the request will fail with ALREADY_EXISTS, or
PERMISSION_DENIED if the caller can't view the
preexisting Artifact.)
This corresponds to the ``artifact_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Artifact:
Instance of a general artifact.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, artifact, artifact_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.CreateArtifactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if artifact is not None:
request.artifact = artifact
if artifact_id is not None:
request.artifact_id = artifact_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_artifact,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_artifact(
self,
request: Union[metadata_service.GetArtifactRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> artifact.Artifact:
r"""Retrieves a specific Artifact.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_artifact():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetArtifactRequest(
name="name_value",
)
# Make the request
response = client.get_artifact(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]):
The request object. Request message for
[MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact].
name (:class:`str`):
Required. The resource name of the Artifact to retrieve.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Artifact:
Instance of a general artifact.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.GetArtifactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_artifact,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_artifacts(
self,
request: Union[metadata_service.ListArtifactsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListArtifactsAsyncPager:
r"""Lists Artifacts in the MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_artifacts():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListArtifactsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_artifacts(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]):
The request object. Request message for
[MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts].
parent (:class:`str`):
Required. The MetadataStore whose Artifacts should be
listed. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager:
Response message for
[MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.ListArtifactsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_artifacts,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListArtifactsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_artifact(
self,
request: Union[metadata_service.UpdateArtifactRequest, dict] = None,
*,
artifact: gca_artifact.Artifact = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_artifact.Artifact:
r"""Updates a stored Artifact.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_artifact():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UpdateArtifactRequest(
)
# Make the request
response = client.update_artifact(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]):
The request object. Request message for
[MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact].
artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`):
Required. The Artifact containing updates. The
Artifact's
[Artifact.name][google.cloud.aiplatform.v1.Artifact.name]
field is used to identify the Artifact to be updated.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}``
This corresponds to the ``artifact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. A FieldMask indicating
which fields should be updated.
Functionality of this field is not yet
supported.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Artifact:
Instance of a general artifact.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([artifact, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.UpdateArtifactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if artifact is not None:
request.artifact = artifact
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_artifact,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("artifact.name", request.artifact.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_artifact(
self,
request: Union[metadata_service.DeleteArtifactRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes an Artifact.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_artifact():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteArtifactRequest(
name="name_value",
)
# Make the request
operation = client.delete_artifact(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]):
The request object. Request message for
[MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact].
name (:class:`str`):
Required. The resource name of the Artifact to delete.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.DeleteArtifactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_artifact,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def purge_artifacts(
self,
request: Union[metadata_service.PurgeArtifactsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Purges Artifacts.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_purge_artifacts():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeArtifactsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
operation = client.purge_artifacts(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]):
The request object. Request message for
[MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts].
parent (:class:`str`):
Required. The metadata store to purge Artifacts from.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse`
Response message for
[MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.PurgeArtifactsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.purge_artifacts,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
metadata_service.PurgeArtifactsResponse,
metadata_type=metadata_service.PurgeArtifactsMetadata,
)
# Done; return the response.
return response
async def create_context(
self,
request: Union[metadata_service.CreateContextRequest, dict] = None,
*,
parent: str = None,
context: gca_context.Context = None,
context_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_context.Context:
r"""Creates a Context associated with a MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_context():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateContextRequest(
parent="parent_value",
)
# Make the request
response = client.create_context(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]):
The request object. Request message for
[MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext].
parent (:class:`str`):
Required. The resource name of the MetadataStore where
the Context should be created. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
context (:class:`google.cloud.aiplatform_v1.types.Context`):
Required. The Context to create.
This corresponds to the ``context`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
context_id (:class:`str`):
The {context} portion of the resource name with the
format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``.
If not provided, the Context's ID will be a UUID
generated by the service. Must be 4-128 characters in
length. Valid characters are ``/[a-z][0-9]-/``. Must be
unique across all Contexts in the parent MetadataStore.
(Otherwise the request will fail with ALREADY_EXISTS, or
PERMISSION_DENIED if the caller can't view the
preexisting Context.)
This corresponds to the ``context_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Context:
Instance of a general context.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, context, context_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.CreateContextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if context is not None:
request.context = context
if context_id is not None:
request.context_id = context_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_context,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_context(
self,
request: Union[metadata_service.GetContextRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> context.Context:
r"""Retrieves a specific Context.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_context():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetContextRequest(
name="name_value",
)
# Make the request
response = client.get_context(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]):
The request object. Request message for
[MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext].
name (:class:`str`):
Required. The resource name of the Context to retrieve.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Context:
Instance of a general context.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.GetContextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_context,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_contexts(
self,
request: Union[metadata_service.ListContextsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListContextsAsyncPager:
r"""Lists Contexts on the MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_contexts():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListContextsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]):
The request object. Request message for
[MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]
parent (:class:`str`):
Required. The MetadataStore whose Contexts should be
listed. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager:
Response message for
[MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.ListContextsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_contexts,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListContextsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_context(
self,
request: Union[metadata_service.UpdateContextRequest, dict] = None,
*,
context: gca_context.Context = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_context.Context:
r"""Updates a stored Context.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_context():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UpdateContextRequest(
)
# Make the request
response = client.update_context(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]):
The request object. Request message for
[MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext].
context (:class:`google.cloud.aiplatform_v1.types.Context`):
Required. The Context containing updates. The Context's
[Context.name][google.cloud.aiplatform.v1.Context.name]
field is used to identify the Context to be updated.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
This corresponds to the ``context`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. A FieldMask indicating
which fields should be updated.
Functionality of this field is not yet
supported.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Context:
Instance of a general context.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([context, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.UpdateContextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if context is not None:
request.context = context
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_context,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("context.name", request.context.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_context(
self,
request: Union[metadata_service.DeleteContextRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a stored Context.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_context():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteContextRequest(
name="name_value",
)
# Make the request
operation = client.delete_context(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]):
The request object. Request message for
[MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext].
name (:class:`str`):
Required. The resource name of the Context to delete.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.DeleteContextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_context,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def purge_contexts(
self,
request: Union[metadata_service.PurgeContextsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Purges Contexts.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_purge_contexts():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeContextsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
operation = client.purge_contexts(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]):
The request object. Request message for
[MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts].
parent (:class:`str`):
Required. The metadata store to purge Contexts from.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse`
Response message for
[MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.PurgeContextsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.purge_contexts,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
metadata_service.PurgeContextsResponse,
metadata_type=metadata_service.PurgeContextsMetadata,
)
# Done; return the response.
return response
async def add_context_artifacts_and_executions(
self,
request: Union[
metadata_service.AddContextArtifactsAndExecutionsRequest, dict
] = None,
*,
context: str = None,
artifacts: Sequence[str] = None,
executions: Sequence[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_service.AddContextArtifactsAndExecutionsResponse:
r"""Adds a set of Artifacts and Executions to a Context.
If any of the Artifacts or Executions have already been
added to a Context, they are simply skipped.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_add_context_artifacts_and_executions():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest(
context="context_value",
)
# Make the request
response = client.add_context_artifacts_and_executions(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]):
The request object. Request message for
[MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
context (:class:`str`):
Required. The resource name of the Context that the
Artifacts and Executions belong to. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
This corresponds to the ``context`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
artifacts (:class:`Sequence[str]`):
The resource names of the Artifacts to attribute to the
Context.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}``
This corresponds to the ``artifacts`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
executions (:class:`Sequence[str]`):
The resource names of the Executions to associate with
the Context.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
This corresponds to the ``executions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse:
Response message for
[MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([context, artifacts, executions])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.AddContextArtifactsAndExecutionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if context is not None:
request.context = context
if artifacts:
request.artifacts.extend(artifacts)
if executions:
request.executions.extend(executions)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.add_context_artifacts_and_executions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def add_context_children(
self,
request: Union[metadata_service.AddContextChildrenRequest, dict] = None,
*,
context: str = None,
child_contexts: Sequence[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_service.AddContextChildrenResponse:
r"""Adds a set of Contexts as children to a parent Context. If any
of the child Contexts have already been added to the parent
Context, they are simply skipped. If this call would create a
cycle or cause any Context to have more than 10 parents, the
request will fail with an INVALID_ARGUMENT error.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_add_context_children():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddContextChildrenRequest(
context="context_value",
)
# Make the request
response = client.add_context_children(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]):
The request object. Request message for
[MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
context (:class:`str`):
Required. The resource name of the parent Context.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
This corresponds to the ``context`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
child_contexts (:class:`Sequence[str]`):
The resource names of the child
Contexts.
This corresponds to the ``child_contexts`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.AddContextChildrenResponse:
Response message for
[MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([context, child_contexts])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.AddContextChildrenRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if context is not None:
request.context = context
if child_contexts:
request.child_contexts.extend(child_contexts)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.add_context_children,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def query_context_lineage_subgraph(
self,
request: Union[
metadata_service.QueryContextLineageSubgraphRequest, dict
] = None,
*,
context: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage_subgraph.LineageSubgraph:
r"""Retrieves Artifacts and Executions within the
specified Context, connected by Event edges and returned
as a LineageSubgraph.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_query_context_lineage_subgraph():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryContextLineageSubgraphRequest(
context="context_value",
)
# Make the request
response = client.query_context_lineage_subgraph(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]):
The request object. Request message for
[MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph].
context (:class:`str`):
Required. The resource name of the Context whose
Artifacts and Executions should be retrieved as a
LineageSubgraph. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
The request may error with FAILED_PRECONDITION if the
number of Artifacts, the number of Executions, or the
number of Events that would be returned for the Context
exceeds 1000.
This corresponds to the ``context`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.LineageSubgraph:
A subgraph of the overall lineage
graph. Event edges connect Artifact and
Execution nodes.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([context])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.QueryContextLineageSubgraphRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if context is not None:
request.context = context
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.query_context_lineage_subgraph,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_execution(
self,
request: Union[metadata_service.CreateExecutionRequest, dict] = None,
*,
parent: str = None,
execution: gca_execution.Execution = None,
execution_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_execution.Execution:
r"""Creates an Execution associated with a MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_execution():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateExecutionRequest(
parent="parent_value",
)
# Make the request
response = client.create_execution(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]):
The request object. Request message for
[MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution].
parent (:class:`str`):
Required. The resource name of the MetadataStore where
the Execution should be created. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
execution (:class:`google.cloud.aiplatform_v1.types.Execution`):
Required. The Execution to create.
This corresponds to the ``execution`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
execution_id (:class:`str`):
The {execution} portion of the resource name with the
format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
If not provided, the Execution's ID will be a UUID
generated by the service. Must be 4-128 characters in
length. Valid characters are ``/[a-z][0-9]-/``. Must be
unique across all Executions in the parent
MetadataStore. (Otherwise the request will fail with
ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't
view the preexisting Execution.)
This corresponds to the ``execution_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Execution:
Instance of a general execution.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, execution, execution_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.CreateExecutionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if execution is not None:
request.execution = execution
if execution_id is not None:
request.execution_id = execution_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_execution,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_execution(
self,
request: Union[metadata_service.GetExecutionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> execution.Execution:
r"""Retrieves a specific Execution.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_execution():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetExecutionRequest(
name="name_value",
)
# Make the request
response = client.get_execution(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]):
The request object. Request message for
[MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution].
name (:class:`str`):
Required. The resource name of the Execution to
retrieve. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Execution:
Instance of a general execution.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.GetExecutionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_execution,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_executions(
self,
request: Union[metadata_service.ListExecutionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListExecutionsAsyncPager:
r"""Lists Executions in the MetadataStore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_executions():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListExecutionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_executions(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]):
The request object. Request message for
[MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions].
parent (:class:`str`):
Required. The MetadataStore whose Executions should be
listed. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager:
Response message for
[MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.ListExecutionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_executions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListExecutionsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_execution(
self,
request: Union[metadata_service.UpdateExecutionRequest, dict] = None,
*,
execution: gca_execution.Execution = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_execution.Execution:
r"""Updates a stored Execution.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_execution():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UpdateExecutionRequest(
)
# Make the request
response = client.update_execution(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]):
The request object. Request message for
[MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution].
execution (:class:`google.cloud.aiplatform_v1.types.Execution`):
Required. The Execution containing updates. The
Execution's
[Execution.name][google.cloud.aiplatform.v1.Execution.name]
field is used to identify the Execution to be updated.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
This corresponds to the ``execution`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. A FieldMask indicating
which fields should be updated.
Functionality of this field is not yet
supported.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Execution:
Instance of a general execution.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([execution, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.UpdateExecutionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if execution is not None:
request.execution = execution
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_execution,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("execution.name", request.execution.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_execution(
self,
request: Union[metadata_service.DeleteExecutionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes an Execution.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_execution():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteExecutionRequest(
name="name_value",
)
# Make the request
operation = client.delete_execution(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]):
The request object. Request message for
[MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution].
name (:class:`str`):
Required. The resource name of the Execution to delete.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.DeleteExecutionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_execution,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def purge_executions(
self,
request: Union[metadata_service.PurgeExecutionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Purges Executions.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_purge_executions():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeExecutionsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
operation = client.purge_executions(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]):
The request object. Request message for
[MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions].
parent (:class:`str`):
Required. The metadata store to purge Executions from.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse`
Response message for
[MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.PurgeExecutionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.purge_executions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
metadata_service.PurgeExecutionsResponse,
metadata_type=metadata_service.PurgeExecutionsMetadata,
)
# Done; return the response.
return response
async def add_execution_events(
self,
request: Union[metadata_service.AddExecutionEventsRequest, dict] = None,
*,
execution: str = None,
events: Sequence[event.Event] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_service.AddExecutionEventsResponse:
r"""Adds Events to the specified Execution. An Event
indicates whether an Artifact was used as an input or
output for an Execution. If an Event already exists
between the Execution and the Artifact, the Event is
skipped.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_add_execution_events():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddExecutionEventsRequest(
execution="execution_value",
)
# Make the request
response = client.add_execution_events(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]):
The request object. Request message for
[MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
execution (:class:`str`):
Required. The resource name of the Execution that the
Events connect Artifacts with. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
This corresponds to the ``execution`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
events (:class:`Sequence[google.cloud.aiplatform_v1.types.Event]`):
The Events to create and add.
This corresponds to the ``events`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.AddExecutionEventsResponse:
Response message for
[MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([execution, events])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.AddExecutionEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if execution is not None:
request.execution = execution
if events:
request.events.extend(events)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.add_execution_events,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("execution", request.execution),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def query_execution_inputs_and_outputs(
self,
request: Union[
metadata_service.QueryExecutionInputsAndOutputsRequest, dict
] = None,
*,
execution: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage_subgraph.LineageSubgraph:
r"""Obtains the set of input and output Artifacts for
this Execution, in the form of LineageSubgraph that also
contains the Execution and connecting Events.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_query_execution_inputs_and_outputs():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest(
execution="execution_value",
)
# Make the request
response = client.query_execution_inputs_and_outputs(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]):
The request object. Request message for
[MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs].
execution (:class:`str`):
Required. The resource name of the Execution whose input
and output Artifacts should be retrieved as a
LineageSubgraph. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}``
This corresponds to the ``execution`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.LineageSubgraph:
A subgraph of the overall lineage
graph. Event edges connect Artifact and
Execution nodes.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([execution])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.QueryExecutionInputsAndOutputsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if execution is not None:
request.execution = execution
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.query_execution_inputs_and_outputs,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("execution", request.execution),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_metadata_schema(
self,
request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None,
*,
parent: str = None,
metadata_schema: gca_metadata_schema.MetadataSchema = None,
metadata_schema_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_metadata_schema.MetadataSchema:
r"""Creates a MetadataSchema.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_metadata_schema():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
metadata_schema = aiplatform_v1.MetadataSchema()
metadata_schema.schema = "schema_value"
request = aiplatform_v1.CreateMetadataSchemaRequest(
parent="parent_value",
metadata_schema=metadata_schema,
)
# Make the request
response = client.create_metadata_schema(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]):
The request object. Request message for
[MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema].
parent (:class:`str`):
Required. The resource name of the MetadataStore where
the MetadataSchema should be created. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
metadata_schema (:class:`google.cloud.aiplatform_v1.types.MetadataSchema`):
Required. The MetadataSchema to
create.
This corresponds to the ``metadata_schema`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
metadata_schema_id (:class:`str`):
The {metadata_schema} portion of the resource name with
the format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}``
If not provided, the MetadataStore's ID will be a UUID
generated by the service. Must be 4-128 characters in
length. Valid characters are ``/[a-z][0-9]-/``. Must be
unique across all MetadataSchemas in the parent
Location. (Otherwise the request will fail with
ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't
view the preexisting MetadataSchema.)
This corresponds to the ``metadata_schema_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.MetadataSchema:
Instance of a general MetadataSchema.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, metadata_schema, metadata_schema_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.CreateMetadataSchemaRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if metadata_schema is not None:
request.metadata_schema = metadata_schema
if metadata_schema_id is not None:
request.metadata_schema_id = metadata_schema_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_metadata_schema,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_metadata_schema(
self,
request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_schema.MetadataSchema:
r"""Retrieves a specific MetadataSchema.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_metadata_schema():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetMetadataSchemaRequest(
name="name_value",
)
# Make the request
response = client.get_metadata_schema(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]):
The request object. Request message for
[MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema].
name (:class:`str`):
Required. The resource name of the MetadataSchema to
retrieve. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.MetadataSchema:
Instance of a general MetadataSchema.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.GetMetadataSchemaRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_metadata_schema,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_metadata_schemas(
self,
request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMetadataSchemasAsyncPager:
r"""Lists MetadataSchemas.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_metadata_schemas():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListMetadataSchemasRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_metadata_schemas(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]):
The request object. Request message for
[MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas].
parent (:class:`str`):
Required. The MetadataStore whose MetadataSchemas should
be listed. Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager:
Response message for
[MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.ListMetadataSchemasRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_metadata_schemas,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMetadataSchemasAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def query_artifact_lineage_subgraph(
self,
request: Union[
metadata_service.QueryArtifactLineageSubgraphRequest, dict
] = None,
*,
artifact: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage_subgraph.LineageSubgraph:
r"""Retrieves lineage of an Artifact represented through
Artifacts and Executions connected by Event edges and
returned as a LineageSubgraph.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_query_artifact_lineage_subgraph():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryArtifactLineageSubgraphRequest(
artifact="artifact_value",
)
# Make the request
response = client.query_artifact_lineage_subgraph(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]):
The request object. Request message for
[MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph].
artifact (:class:`str`):
Required. The resource name of the Artifact whose
Lineage needs to be retrieved as a LineageSubgraph.
Format:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}``
The request may error with FAILED_PRECONDITION if the
number of Artifacts, the number of Executions, or the
number of Events that would be returned for the Context
exceeds 1000.
This corresponds to the ``artifact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.LineageSubgraph:
A subgraph of the overall lineage
graph. Event edges connect Artifact and
Execution nodes.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([artifact])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_service.QueryArtifactLineageSubgraphRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if artifact is not None:
request.artifact = artifact
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.query_artifact_lineage_subgraph,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("artifact", request.artifact),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("MetadataServiceAsyncClient",)
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/services/metadata_service/async_client.py | Python | apache-2.0 | 145,760 |
#!/usr/bin/env python
# Copyright 2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import tempfile
from pyscf import lib, gto, scf
from pyscf.tools import cubegen
mol = gto.Mole()
mol.atom = '''
O 0.00000000, 0.000000, 0.119748
H 0.00000000, 0.761561, -0.478993
H 0.00000000, -0.761561, -0.478993 '''
mol.verbose = 0
mol.build()
mf = scf.RHF(mol).run()
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_mep(self):
ftmp = tempfile.NamedTemporaryFile()
mep = cubegen.mep(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10)
self.assertEqual(mep.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(mep), -0.3198103636180436, 9)
mep = cubegen.mep(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(mep.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(mep), -4.653995909548524, 9)
def test_orb(self):
ftmp = tempfile.NamedTemporaryFile()
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=10, nz=10)
self.assertEqual(orb.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(orb), -0.11804191128016768, 9)
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(orb.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(orb), -0.8591778390706646, 9)
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=1, nz=1)
self.assertEqual(orb.shape, (10,1,1))
self.assertAlmostEqual(lib.finger(orb), 6.921008881822988e-09, 9)
def test_rho(self):
ftmp = tempfile.NamedTemporaryFile()
rho = cubegen.density(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10)
self.assertEqual(rho.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(rho), -0.3740462814001553, 9)
rho = cubegen.density(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(rho.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(rho), -1.007950007160415, 9)
if __name__ == "__main__":
print("Full Tests for molden")
unittest.main()
| sunqm/pyscf | pyscf/tools/test/test_cubegen.py | Python | apache-2.0 | 2,994 |
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests in OpenHTF.
Tests are main entry point for OpenHTF tests. In its simplest form a
test is a series of Phases that are executed by the OpenHTF framework.
"""
import argparse
import collections
import logging
import os
import sys
import textwrap
import threading
from types import LambdaType
import uuid
import weakref
import colorama
import mutablerecords
from openhtf import util
from openhtf.core import phase_descriptor
from openhtf.core import phase_executor
from openhtf.core import phase_group
from openhtf.core import test_executor
from openhtf.core import test_record
from openhtf.util import conf
from openhtf.util import console_output
from openhtf.util import logs
import six
_LOG = logging.getLogger(__name__)
conf.declare('capture_source', description=textwrap.dedent(
'''Whether to capture the source of phases and the test module. This
defaults to False since this potentially reads many files and makes large
string copies.
Set to 'true' if you want to capture your test's source.'''),
default_value=False)
# TODO(arsharma): Deprecate this configuration after removing the old teardown
# specification.
conf.declare('teardown_timeout_s', default_value=30, description=
'Default timeout (in seconds) for test teardown functions; '
'this option is deprecated and only applies to the deprecated '
'Test level teardown function.')
class UnrecognizedTestUidError(Exception):
"""Raised when information is requested about an unknown Test UID."""
class InvalidTestPhaseError(Exception):
"""Raised when an invalid method is decorated."""
class InvalidTestStateError(Exception):
"""Raised when an operation is attempted in an invalid state."""
def create_arg_parser(add_help=False):
"""Creates an argparse.ArgumentParser for parsing command line flags.
If you want to add arguments, create your own with this as a parent:
>>> parser = argparse.ArgumentParser(
'My args title', parents=[openhtf.create_arg_parser()])
>>> parser.parse_args()
Args:
add_help: boolean option passed through to arg parser.
Returns:
an `argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(
'OpenHTF-based testing',
parents=[
conf.ARG_PARSER,
console_output.ARG_PARSER,
logs.ARG_PARSER,
phase_executor.ARG_PARSER,
],
add_help=add_help)
parser.add_argument(
'--config-help', action='store_true',
help='Instead of executing the test, simply print all available config '
'keys and their description strings.')
return parser
class Test(object):
"""An object that represents an OpenHTF test.
Example:
def PhaseOne(test):
# Integrate more widgets
def PhaseTwo(test):
# Analyze widget integration status
Test(PhaseOne, PhaseTwo).execute()
Note that Test() objects *must* be created in the main thread, but can be
.execute()'d in a separate thread.
"""
TEST_INSTANCES = weakref.WeakValueDictionary()
HANDLED_SIGINT_ONCE = False
def __init__(self, *phases, **metadata):
# Some sanity checks on special metadata keys we automatically fill in.
if 'config' in metadata:
raise KeyError(
'Invalid metadata key "config", it will be automatically populated.')
self.created_time_millis = util.time_millis()
self.last_run_time_millis = None
self._test_options = TestOptions()
self._lock = threading.Lock()
self._executor = None
self._test_desc = TestDescriptor(
phases, test_record.CodeInfo.uncaptured(), metadata)
if conf.capture_source:
# First, we copy the phases with the real CodeInfo for them.
group = self._test_desc.phase_group.load_code_info()
# Then we replace the TestDescriptor with one that stores the test
# module's CodeInfo as well as our newly copied phases.
code_info = test_record.CodeInfo.for_module_from_stack(levels_up=2)
self._test_desc = self._test_desc._replace(
code_info=code_info, phase_group=group)
# Make sure configure() gets called at least once before Execute(). The
# user might call configure() again to override options, but we don't want
# to force them to if they want to use defaults. For default values, see
# the class definition of TestOptions.
if 'test_name' in metadata:
# Allow legacy metadata key for specifying test name.
self.configure(name=metadata['test_name'])
else:
self.configure()
@classmethod
def from_uid(cls, test_uid):
"""Get Test by UID.
Args:
test_uid: uuid for desired test.
Returns:
Test object for given by UID.
Raises:
UnrecognizedTestUidError: If the test_uid is not recognized.
"""
test = cls.TEST_INSTANCES.get(test_uid)
if not test:
raise UnrecognizedTestUidError('Test UID %s not recognized' % test_uid)
return test
@property
def uid(self):
if self._executor is not None:
return self._executor.uid
def make_uid(self):
"""Returns the next test execution's UID.
This identifier must be unique but trackable across invocations of
execute(). Therefore, it's made of four parts separated by ':'
* Process-specific (decided on process start up)
* Test descriptor-specific (decided on descriptor creation)
* Execution-specific (decided on test start)
"""
return '%s:%s:%s:%s' % (os.getpid(), self.descriptor.uid,
uuid.uuid4().hex[:16], util.time_millis())
@property
def descriptor(self):
"""Static data about this test, does not change across Execute() calls."""
return self._test_desc
@property
def state(self):
"""Transient state info about the currently executing test, or None."""
with self._lock:
if self._executor:
return self._executor.test_state
def get_option(self, option):
return getattr(self._test_options, option)
def add_output_callbacks(self, *callbacks):
"""Add the given function as an output module to this test."""
self._test_options.output_callbacks.extend(callbacks)
def configure(self, **kwargs):
"""Update test-wide configuration options. See TestOptions for docs."""
# These internally ensure they are safe to call multiple times with no weird
# side effects.
known_args, _ = create_arg_parser(add_help=True).parse_known_args()
if known_args.config_help:
sys.stdout.write(conf.help_text)
sys.exit(0)
logs.configure_logging()
for key, value in six.iteritems(kwargs):
setattr(self._test_options, key, value)
@classmethod
def handle_sig_int(cls, *_):
if cls.TEST_INSTANCES:
_LOG.error('Received SIGINT, stopping all tests.')
for test in cls.TEST_INSTANCES.values():
test.abort_from_sig_int()
if not cls.HANDLED_SIGINT_ONCE:
cls.HANDLED_SIGINT_ONCE = True
# Emilio 2018-09-21: Raising this KeyboardInterrupt caused a traceback to be shown on-screen after posting the
# test to the database. There's no point.
# raise KeyboardInterrupt
# Otherwise, does not raise KeyboardInterrupt to ensure that the tests are
# cleaned up.
def abort_from_sig_int(self):
"""Abort test execution abruptly, only in response to SIGINT."""
with self._lock:
_LOG.error('Aborting %s due to SIGINT', self)
if self._executor:
# TestState str()'s nicely to a descriptive string, so let's log that
# just for good measure.
_LOG.error('Test state: %s', self._executor.test_state)
self._executor.abort()
# TODO(arsharma): teardown_function test option is deprecated; remove this.
def _get_running_test_descriptor(self):
"""If there is a teardown_function, wrap current descriptor with it."""
if not self._test_options.teardown_function:
return self._test_desc
teardown_phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(
self._test_options.teardown_function)
if not teardown_phase.options.timeout_s:
teardown_phase.options.timeout_s = conf.teardown_timeout_s
return TestDescriptor(
phase_group.PhaseGroup(main=[self._test_desc.phase_group],
teardown=[teardown_phase]),
self._test_desc.code_info, self._test_desc.metadata)
def execute(self, test_start=None):
"""Starts the framework and executes the given test.
Args:
test_start: Either a trigger phase for starting the test, or a function
that returns a DUT ID. If neither is provided, defaults to not
setting the DUT ID.
Returns:
Boolean indicating whether the test failed (False) or passed (True).
Raises:
InvalidTestStateError: if this test is already being executed.
"""
# Lock this section so we don't .stop() the executor between instantiating
# it and .Start()'ing it, doing so does weird things to the executor state.
with self._lock:
# Sanity check to make sure someone isn't doing something weird like
# trying to Execute() the same test twice in two separate threads. We
# hold the lock between here and Start()'ing the executor to guarantee
# that only one thread is successfully executing the test.
if self._executor:
raise InvalidTestStateError('Test already running', self._executor)
# Snapshot some things we care about and store them.
self._test_desc.metadata['test_name'] = self._test_options.name
self._test_desc.metadata['config'] = conf._asdict()
self.last_run_time_millis = util.time_millis()
if isinstance(test_start, LambdaType):
@phase_descriptor.PhaseOptions()
def trigger_phase(test):
test.test_record.dut_id = test_start()
trigger = trigger_phase
else:
trigger = test_start
if conf.capture_source:
trigger.code_info = test_record.CodeInfo.for_function(trigger.func)
test_desc = self._get_running_test_descriptor()
self._executor = test_executor.TestExecutor(
test_desc, self.make_uid(), trigger, self._test_options)
_LOG.info('Executing test: %s', self.descriptor.code_info.name)
self.TEST_INSTANCES[self.uid] = self
self._test_desc.metadata['openhtf_uid'] = self.uid
_LOG.debug('OpenHTF test instance uid "%s" recorded in metadata["openhtf_uid"]' % self.uid)
self._executor.start()
try:
self._executor.wait()
except KeyboardInterrupt:
# The SIGINT handler only raises the KeyboardInterrupt once, so only retry
# that once.
self._executor.wait()
raise
finally:
try:
final_state = self._executor.finalize()
_LOG.debug('Test completed for %s, outputting now.',
final_state.test_record.metadata['test_name'])
for output_cb in self._test_options.output_callbacks:
try:
output_cb(final_state.test_record)
except Exception: # pylint: disable=broad-except
_LOG.exception(
'Output callback %s raised; continuing anyway', output_cb)
# Make sure the final outcome of the test is printed last and in a
# noticeable color so it doesn't get scrolled off the screen or missed.
if final_state.test_record.outcome == test_record.Outcome.ERROR:
for detail in final_state.test_record.outcome_details:
console_output.error_print(detail.description)
else:
colors = collections.defaultdict(lambda: colorama.Style.BRIGHT)
colors[test_record.Outcome.PASS] = ''.join((colorama.Style.BRIGHT,
colorama.Fore.GREEN))
colors[test_record.Outcome.FAIL] = ''.join((colorama.Style.BRIGHT,
colorama.Fore.RED))
msg_template = 'test: {name} outcome: {color}{outcome}{rst}'
console_output.banner_print(msg_template.format(
name=final_state.test_record.metadata['test_name'],
color=colors[final_state.test_record.outcome],
outcome=final_state.test_record.outcome.name,
rst=colorama.Style.RESET_ALL))
finally:
del self.TEST_INSTANCES[self.uid]
self._executor = None
return final_state.test_record.outcome == test_record.Outcome.PASS
# TODO(arsharma): Deprecate the teardown_function in favor of PhaseGroups.
class TestOptions(mutablerecords.Record('TestOptions', [], {
'name': 'openhtf_test',
'output_callbacks': list,
'teardown_function': None,
'failure_exceptions': list,
'default_dut_id': 'UNKNOWN_DUT',
})):
"""Class encapsulating various tunable knobs for Tests and their defaults.
name: The name of the test to be put into the metadata.
output_callbacks: List of output callbacks to run, typically it's better to
use add_output_callbacks(), but you can pass [] here to reset them.
teardown_function: Function to run at teardown. We pass the same arguments to
it as a phase.
failure_exceptions: Exceptions to cause a test FAIL instead of ERROR. When a
test run exits early due to an exception, the run will be marked as a FAIL
if the raised exception matches one of the types in this list. Otherwise,
the run is marked as ERROR.
default_dut_id: The DUT ID that will be used if the start trigger and all
subsequent phases fail to set one.
"""
class TestDescriptor(collections.namedtuple(
'TestDescriptor', ['phase_group', 'code_info', 'metadata', 'uid'])):
"""An object that represents the reusable portions of an OpenHTF test.
This object encapsulates the static test information that is set once and used
by the framework along the way.
Attributes:
phase_group: The top level phase group to execute for this Test.
metadata: Any metadata that should be associated with test records.
code_info: Information about the module that created the Test.
uid: UID for this test.
"""
def __new__(cls, phases, code_info, metadata):
group = phase_group.PhaseGroup.convert_if_not(phases)
return super(TestDescriptor, cls).__new__(
cls, group, code_info, metadata, uid=uuid.uuid4().hex[:16])
@property
def plug_types(self):
"""Returns set of plug types required by this test."""
return {plug.cls
for phase in self.phase_group
for plug in phase.plugs}
class TestApi(collections.namedtuple('TestApi', [
'logger', 'state', 'test_record', 'measurements', 'attachments',
'attach', 'attach_from_file', 'get_measurement', 'get_attachment',
'notify_update'])):
"""Class passed to test phases as the first argument.
Attributes:
dut_id: This attribute provides getter and setter access to the DUT ID
of the device under test by the currently running openhtf.Test. A
non-empty DUT ID *must* be set by the end of a test, or no output
will be produced. It may be set via return value from a callable
test_start argument to openhtf.Test.Execute(), or may be set in a
test phase via this attribute.
logger: A Python Logger instance that can be used to log to the resulting
TestRecord. This object supports all the usual log levels, and
outputs to stdout (configurable) and the frontend via the Station
API, if it's enabled, in addition to the 'log_records' attribute
of the final TestRecord output by the running test.
measurements: A measurements.Collection object used to get/set
measurement values. See util/measurements.py for more implementation
details, but in the simple case, set measurements directly as
attributes on this object (see examples/measurements.py for examples).
state: A dict (initially empty) that is persisted across test phases (but
resets for every invocation of Execute() on an openhtf.Test). This
can be used for any test-wide state you need to persist across phases.
Use this with caution, however, as it is not persisted in the output
TestRecord or displayed on the web frontend in any way.
test_record: A reference to the output TestRecord for the currently
running openhtf.Test. Direct access to this attribute is *strongly*
discouraged, but provided as a catch-all for interfaces not otherwise
provided by TestApi. If you find yourself using this, please file a
feature request for an alternative at:
https://github.com/google/openhtf/issues/new
Callable Attributes:
attach: Attach binary data to the test, see TestState.attach().
attach_from_file: Attach binary data from a file, see
TestState.attach_from_file().
get_attachment: Get copy of attachment contents from current or previous
phase, see TestState.get_attachement.
get_measurement: Get copy of a measurement from a current or previous phase,
see TestState.get_measurement().
notify_update: Notify any frontends of an interesting update. Typically
this is automatically called internally when interesting things happen,
but it can be called by the user (takes no args), for instance if
modifying test_record directly.
Read-only Attributes:
attachments: Dict mapping attachment name to test_record.Attachment
instance containing the data that was attached (and the MIME type
that was assumed based on extension, if any). Only attachments
that have been attached in the current phase show up here, and this
attribute should not be modified directly; use TestApi.attach() or
TestApi.attach_from_file() instead.
"""
@property
def dut_id(self):
return self.test_record.dut_id
@dut_id.setter
def dut_id(self, dut_id):
if self.test_record.dut_id:
self.logger.warning('Overriding previous DUT ID "%s" with "%s".',
self.test_record.dut_id, dut_id)
self.test_record.dut_id = dut_id
self.notify_update()
| ShaperTools/openhtf | openhtf/core/test_descriptor.py | Python | apache-2.0 | 18,718 |
"""Component for interacting with a Lutron Caseta system."""
from __future__ import annotations
import asyncio
import contextlib
import logging
import ssl
import async_timeout
from pylutron_caseta import BUTTON_STATUS_PRESSED
from pylutron_caseta.smartbridge import Smartbridge
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.typing import ConfigType
from .const import (
ACTION_PRESS,
ACTION_RELEASE,
ATTR_ACTION,
ATTR_AREA_NAME,
ATTR_BUTTON_NUMBER,
ATTR_DEVICE_NAME,
ATTR_LEAP_BUTTON_NUMBER,
ATTR_SERIAL,
ATTR_TYPE,
BRIDGE_DEVICE,
BRIDGE_DEVICE_ID,
BRIDGE_LEAP,
BRIDGE_TIMEOUT,
BUTTON_DEVICES,
CONF_CA_CERTS,
CONF_CERTFILE,
CONF_KEYFILE,
DOMAIN,
LUTRON_CASETA_BUTTON_EVENT,
MANUFACTURER,
)
from .device_trigger import (
DEVICE_TYPE_SUBTYPE_MAP_TO_LIP,
LEAP_TO_DEVICE_TYPE_SUBTYPE_MAP,
)
_LOGGER = logging.getLogger(__name__)
DATA_BRIDGE_CONFIG = "lutron_caseta_bridges"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_KEYFILE): cv.string,
vol.Required(CONF_CERTFILE): cv.string,
vol.Required(CONF_CA_CERTS): cv.string,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SCENE,
Platform.SWITCH,
]
async def async_setup(hass: HomeAssistant, base_config: ConfigType) -> bool:
"""Set up the Lutron component."""
hass.data.setdefault(DOMAIN, {})
if DOMAIN in base_config:
bridge_configs = base_config[DOMAIN]
for config in bridge_configs:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
# extract the config keys one-by-one just to be explicit
data={
CONF_HOST: config[CONF_HOST],
CONF_KEYFILE: config[CONF_KEYFILE],
CONF_CERTFILE: config[CONF_CERTFILE],
CONF_CA_CERTS: config[CONF_CA_CERTS],
},
)
)
return True
async def async_setup_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> bool:
"""Set up a bridge from a config entry."""
entry_id = config_entry.entry_id
host = config_entry.data[CONF_HOST]
keyfile = hass.config.path(config_entry.data[CONF_KEYFILE])
certfile = hass.config.path(config_entry.data[CONF_CERTFILE])
ca_certs = hass.config.path(config_entry.data[CONF_CA_CERTS])
bridge = None
try:
bridge = Smartbridge.create_tls(
hostname=host, keyfile=keyfile, certfile=certfile, ca_certs=ca_certs
)
except ssl.SSLError:
_LOGGER.error("Invalid certificate used to connect to bridge at %s", host)
return False
timed_out = True
with contextlib.suppress(asyncio.TimeoutError):
async with async_timeout.timeout(BRIDGE_TIMEOUT):
await bridge.connect()
timed_out = False
if timed_out or not bridge.is_connected():
await bridge.close()
if timed_out:
raise ConfigEntryNotReady(f"Timed out while trying to connect to {host}")
if not bridge.is_connected():
raise ConfigEntryNotReady(f"Cannot connect to {host}")
_LOGGER.debug("Connected to Lutron Caseta bridge via LEAP at %s", host)
devices = bridge.get_devices()
bridge_device = devices[BRIDGE_DEVICE_ID]
buttons = bridge.buttons
_async_register_bridge_device(hass, entry_id, bridge_device)
button_devices = _async_register_button_devices(
hass, entry_id, bridge_device, buttons
)
_async_subscribe_pico_remote_events(hass, bridge, buttons)
# Store this bridge (keyed by entry_id) so it can be retrieved by the
# platforms we're setting up.
hass.data[DOMAIN][entry_id] = {
BRIDGE_LEAP: bridge,
BRIDGE_DEVICE: bridge_device,
BUTTON_DEVICES: button_devices,
}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
@callback
def _async_register_bridge_device(
hass: HomeAssistant, config_entry_id: str, bridge_device: dict
) -> None:
"""Register the bridge device in the device registry."""
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
name=bridge_device["name"],
manufacturer=MANUFACTURER,
config_entry_id=config_entry_id,
identifiers={(DOMAIN, bridge_device["serial"])},
model=f"{bridge_device['model']} ({bridge_device['type']})",
configuration_url="https://device-login.lutron.com",
)
@callback
def _async_register_button_devices(
hass: HomeAssistant,
config_entry_id: str,
bridge_device,
button_devices_by_id: dict[int, dict],
) -> dict[str, dr.DeviceEntry]:
"""Register button devices (Pico Remotes) in the device registry."""
device_registry = dr.async_get(hass)
button_devices_by_dr_id = {}
seen = set()
for device in button_devices_by_id.values():
if "serial" not in device or device["serial"] in seen:
continue
seen.add(device["serial"])
dr_device = device_registry.async_get_or_create(
name=device["name"],
suggested_area=device["name"].split("_")[0],
manufacturer=MANUFACTURER,
config_entry_id=config_entry_id,
identifiers={(DOMAIN, device["serial"])},
model=f"{device['model']} ({device['type']})",
via_device=(DOMAIN, bridge_device["serial"]),
)
button_devices_by_dr_id[dr_device.id] = device
return button_devices_by_dr_id
@callback
def _async_subscribe_pico_remote_events(
hass: HomeAssistant,
bridge_device: Smartbridge,
button_devices_by_id: dict[int, dict],
):
"""Subscribe to lutron events."""
@callback
def _async_button_event(button_id, event_type):
device = button_devices_by_id.get(button_id)
if not device:
return
if event_type == BUTTON_STATUS_PRESSED:
action = ACTION_PRESS
else:
action = ACTION_RELEASE
type_ = device["type"]
name = device["name"]
button_number = device["button_number"]
# The original implementation used LIP instead of LEAP
# so we need to convert the button number to maintain compat
sub_type_to_lip_button = DEVICE_TYPE_SUBTYPE_MAP_TO_LIP[type_]
leap_button_to_sub_type = LEAP_TO_DEVICE_TYPE_SUBTYPE_MAP[type_]
if (sub_type := leap_button_to_sub_type.get(button_number)) is None:
_LOGGER.error(
"Unknown LEAP button number %s is not in %s for %s (%s)",
button_number,
leap_button_to_sub_type,
name,
type_,
)
return
lip_button_number = sub_type_to_lip_button[sub_type]
hass.bus.async_fire(
LUTRON_CASETA_BUTTON_EVENT,
{
ATTR_SERIAL: device["serial"],
ATTR_TYPE: type_,
ATTR_BUTTON_NUMBER: lip_button_number,
ATTR_LEAP_BUTTON_NUMBER: button_number,
ATTR_DEVICE_NAME: name,
ATTR_AREA_NAME: name.split("_")[0],
ATTR_ACTION: action,
},
)
for button_id in button_devices_by_id:
bridge_device.add_button_subscriber(
str(button_id),
lambda event_type, button_id=button_id: _async_button_event(
button_id, event_type
),
)
async def async_unload_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Unload the bridge bridge from a config entry."""
data = hass.data[DOMAIN][entry.entry_id]
smartbridge: Smartbridge = data[BRIDGE_LEAP]
await smartbridge.close()
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class LutronCasetaDevice(Entity):
"""Common base class for all Lutron Caseta devices."""
def __init__(self, device, bridge, bridge_device):
"""Set up the base class.
[:param]device the device metadata
[:param]bridge the smartbridge object
[:param]bridge_device a dict with the details of the bridge
"""
self._device = device
self._smartbridge = bridge
self._bridge_device = bridge_device
async def async_added_to_hass(self):
"""Register callbacks."""
self._smartbridge.add_subscriber(self.device_id, self.async_write_ha_state)
@property
def device_id(self):
"""Return the device ID used for calling pylutron_caseta."""
return self._device["device_id"]
@property
def name(self):
"""Return the name of the device."""
return self._device["name"]
@property
def serial(self):
"""Return the serial number of the device."""
return self._device["serial"]
@property
def unique_id(self):
"""Return the unique ID of the device (serial)."""
return str(self.serial)
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={(DOMAIN, self.serial)},
manufacturer=MANUFACTURER,
model=f"{self._device['model']} ({self._device['type']})",
name=self.name,
suggested_area=self._device["name"].split("_")[0],
via_device=(DOMAIN, self._bridge_device["serial"]),
configuration_url="https://device-login.lutron.com",
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {"device_id": self.device_id, "zone_id": self._device["zone"]}
@property
def should_poll(self):
"""No polling needed."""
return False
| mezz64/home-assistant | homeassistant/components/lutron_caseta/__init__.py | Python | apache-2.0 | 10,684 |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Generate SystemVerilog designs from IpBlock object"""
import logging as log
import os
from typing import Dict, Optional, Tuple
from mako import exceptions # type: ignore
from mako.template import Template # type: ignore
from pkg_resources import resource_filename
from .ip_block import IpBlock
from .lib import check_int
from .multi_register import MultiRegister
from .reg_base import RegBase
from .register import Register
def escape_name(name: str) -> str:
return name.lower().replace(' ', '_')
def make_box_quote(msg: str, indent: str = ' ') -> str:
hr = indent + ('/' * (len(msg) + 6))
middle = indent + '// ' + msg + ' //'
return '\n'.join([hr, middle, hr])
def _get_awparam_name(iface_name: Optional[str]) -> str:
return (iface_name or 'Iface').capitalize() + 'Aw'
def get_addr_widths(block: IpBlock) -> Dict[Optional[str], Tuple[str, int]]:
'''Return the address widths for the device interfaces
Returns a dictionary keyed by interface name whose values are pairs:
(paramname, width) where paramname is IfaceAw for an unnamed interface and
FooAw for an interface called foo. This is constructed in the same order as
block.reg_blocks.
If there is a single device interface and that interface is unnamed, use
the more general parameter name "BlockAw".
'''
assert block.reg_blocks
if len(block.reg_blocks) == 1 and None in block.reg_blocks:
return {None: ('BlockAw', block.reg_blocks[None].get_addr_width())}
return {name: (_get_awparam_name(name), rb.get_addr_width())
for name, rb in block.reg_blocks.items()}
def get_type_name_pfx(block: IpBlock, iface_name: Optional[str]) -> str:
return block.name.lower() + ('' if iface_name is None
else '_{}'.format(iface_name.lower()))
def get_r0(reg: RegBase) -> Register:
'''Get a Register representing an entry in the RegBase'''
if isinstance(reg, Register):
return reg
else:
assert isinstance(reg, MultiRegister)
return reg.reg
def get_iface_tx_type(block: IpBlock,
iface_name: Optional[str],
hw2reg: bool) -> str:
x2x = 'hw2reg' if hw2reg else 'reg2hw'
pfx = get_type_name_pfx(block, iface_name)
return '_'.join([pfx, x2x, 't'])
def get_reg_tx_type(block: IpBlock, reg: RegBase, hw2reg: bool) -> str:
'''Get the name of the hw2reg or reg2hw type for reg'''
if isinstance(reg, Register):
r0 = reg
type_suff = 'reg_t'
else:
assert isinstance(reg, MultiRegister)
r0 = reg.reg
type_suff = 'mreg_t'
x2x = 'hw2reg' if hw2reg else 'reg2hw'
return '_'.join([block.name.lower(),
x2x,
r0.name.lower(),
type_suff])
def gen_rtl(block: IpBlock, outdir: str) -> int:
# Read Register templates
reg_top_tpl = Template(
filename=resource_filename('reggen', 'reg_top.sv.tpl'))
reg_pkg_tpl = Template(
filename=resource_filename('reggen', 'reg_pkg.sv.tpl'))
# Generate <block>_reg_pkg.sv
#
# This defines the various types used to interface between the *_reg_top
# module(s) and the block itself.
reg_pkg_path = os.path.join(outdir, block.name.lower() + "_reg_pkg.sv")
with open(reg_pkg_path, 'w', encoding='UTF-8') as fout:
try:
fout.write(reg_pkg_tpl.render(block=block))
except: # noqa F722 for template Exception handling
log.error(exceptions.text_error_template().render())
return 1
# Generate the register block implementation(s). For a device interface
# with no name we generate the register module "<block>_reg_top" (writing
# to <block>_reg_top.sv). In any other case, we also need the interface
# name, giving <block>_<ifname>_reg_top.
lblock = block.name.lower()
for if_name, rb in block.reg_blocks.items():
if if_name is None:
mod_base = lblock
else:
mod_base = lblock + '_' + if_name.lower()
mod_name = mod_base + '_reg_top'
reg_top_path = os.path.join(outdir, mod_name + '.sv')
with open(reg_top_path, 'w', encoding='UTF-8') as fout:
try:
fout.write(reg_top_tpl.render(block=block,
mod_base=mod_base,
mod_name=mod_name,
if_name=if_name,
rb=rb))
except: # noqa F722 for template Exception handling
log.error(exceptions.text_error_template().render())
return 1
return 0
def render_param(dst_type: str, value: str) -> str:
'''Render a parameter value as used for the destination type
The value is itself a string but we have already checked that if dst_type
happens to be "int" or "int unsigned" then it can be parsed as an integer.
If dst_type is "int unsigned" and the value is larger than 2^31 then
explicitly generate a 32-bit hex value. This allows 32-bit literals whose
top bits are set (which can't be written as bare integers in SystemVerilog
without warnings, because those are interpreted as ints).
'''
if dst_type == 'int unsigned':
# This shouldn't fail because we've already checked it in
# _parse_parameter in params.py
int_val = check_int(value, "integer parameter")
if int_val >= (1 << 31):
return "32'h{:08x}".format(int_val)
return value
| lowRISC/opentitan | util/reggen/gen_rtl.py | Python | apache-2.0 | 5,763 |
import json
from typing import Tuple
from great_expectations.core.id_dict import IDDict
class MetricConfiguration:
def __init__(
self,
metric_name: str,
metric_domain_kwargs: dict,
metric_value_kwargs: dict = None,
metric_dependencies: dict = None,
):
self._metric_name = metric_name
if not isinstance(metric_domain_kwargs, IDDict):
metric_domain_kwargs = IDDict(metric_domain_kwargs)
self._metric_domain_kwargs = metric_domain_kwargs
if not isinstance(metric_value_kwargs, IDDict):
if metric_value_kwargs is None:
metric_value_kwargs = {}
metric_value_kwargs = IDDict(metric_value_kwargs)
self._metric_value_kwargs = metric_value_kwargs
if metric_dependencies is None:
metric_dependencies = {}
self._metric_dependencies = metric_dependencies
def __repr__(self):
return json.dumps(self.to_json_dict(), indent=2)
def __str__(self):
return self.__repr__()
@property
def metric_name(self):
return self._metric_name
@property
def metric_domain_kwargs(self):
return self._metric_domain_kwargs
@property
def metric_value_kwargs(self):
return self._metric_value_kwargs
@property
def metric_domain_kwargs_id(self):
return self.metric_domain_kwargs.to_id()
@property
def metric_value_kwargs_id(self):
return self.metric_value_kwargs.to_id()
@property
def metric_dependencies(self):
return self._metric_dependencies
@metric_dependencies.setter
def metric_dependencies(self, metric_dependencies):
self._metric_dependencies = metric_dependencies
@property
def id(self) -> Tuple[str, str, str]:
return (
self.metric_name,
self.metric_domain_kwargs_id,
self.metric_value_kwargs_id,
)
def to_json_dict(self) -> dict:
json_dict: dict = {
"metric_name": self.metric_name,
"metric_domain_kwargs": self.metric_domain_kwargs,
"metric_domain_kwargs_id": self.metric_domain_kwargs_id,
"metric_value_kwargs": self.metric_value_kwargs,
"metric_value_kwargs_id": self.metric_value_kwargs_id,
"id": self.id,
}
return json_dict
| great-expectations/great_expectations | great_expectations/validator/metric_configuration.py | Python | apache-2.0 | 2,379 |
class TimestampAntiStealingLinkConfig(dict):
def __init__(self, json):
if json is not None:
if 'enabled' in json.keys():
self.enabled = json['enabled']
else:
self.enabled = None
if 'primaryKey' in json.keys():
self.primary_key = json['primaryKey']
else:
self.primary_key = None
if 'secondaryKey' in json.keys():
self.secondary_key = json['secondaryKey']
else:
self.secondary_key = None
else:
raise GalaxyFDSClientException("Json data cannot be None")
@property
def enabled(self):
return self['enabled']
@enabled.setter
def enabled(self, enabled):
self['enabled'] = enabled
@property
def primary_key(self):
return self['primaryKey']
@primary_key.setter
def primary_key(self, primary_key):
self['primaryKey'] = primary_key
@property
def secondary_key(self):
return self['secondaryKey']
@secondary_key.setter
def secondary_key(self, secondary_key):
self['secondaryKey'] = secondary_key
| XiaoMi/galaxy-fds-sdk-python | fds/model/timestamp_anti_stealing_link_config.py | Python | apache-2.0 | 1,037 |
#!/usr/bin/python
## Download files from Amazon S3 (e.g. raw photos for 3D models)
## Andy Bevan 15-Jun-2014, updated 21-Nov-2014
## Daniel Pett updated 05-Jan-2016
__author__ = 'ahb108'
## Currently for Python 2.7.5 (tested on MacOSX 10.9.2) launched in a virtual environment:
from PIL import Image # Pillow with libjpeg support
from PIL import ImageDraw
import urllib3
import json
import re
import numpy as np
import argparse
import os
import urllib2
import zipfile
# Argument parser
parser = argparse.ArgumentParser(description='This is a script to combine vector polygon masks into a binary raster mask for 3d modelling.')
parser.add_argument('-a','--app',help='MicroPasts application', required=True)
parser.add_argument('-w','--wd', help='Working directory',required=True)
args = parser.parse_args()
## Global settings ##
os.chdir(args.wd)
app = args.app
pybinst = 'http://crowdsourced.micropasts.org'
###################################
# Get the raw jpg files from working directory
ext = ['.JPG', '.jpg', '.jpeg', '.JPEG']
files = [ f for f in os.listdir('.') if f.endswith(tuple(ext)) ]
print("Masking each individual photograph...")
for q in range(0, len(files)):
# Open an example image
img = Image.open(files[q])
imnameonly = os.path.splitext(files[q])[0]
# Get JSON data for tasks and find task ID for this file
downloadURL = str(pybinst) + '/project/' + str(app) + '/tasks/export?type=task&format=json'
outputFilename = str(app) + '_task.json'
# Download JSON file to working direcory
response = urllib2.urlopen(downloadURL)
zippedData = response.read()
# Save data to disk
output = open(outputFilename,'wb')
output.write(zippedData)
output.close()
# Extract the data
zfobj = zipfile.ZipFile(outputFilename)
for name in zfobj.namelist():
uncompressed = zfobj.read(name)
# Save uncompressed data to disk
outputFilename = name
output = open(outputFilename,'wb')
output.write(uncompressed)
output.close()
with open(outputFilename) as data_file:
jtasks = json.load(data_file)
# Loop through looking for those tasks with the necessary look-up image (almost always one
# unless tasks have been duplicated, but allowing more than one just in case)
imtasks = []
for elm in range(0, len(jtasks)):
onetask = jtasks[elm]
onetaskurl = onetask['info']['url_b'].encode('utf-8')
if re.search(files[q], onetaskurl): imtasks.extend([onetask['id']])
# Get JSON data for task runs (even if they are duplicated)
jtaskruns = []
for a in range(0, len(imtasks)):
downloadURL = str(pybinst) + '/project/' + str(app) + '/' + str(imtasks[a]) + '/results.json'
outputFilename = str(app) + str(imtasks[a]) + '_task_run.json'
# Download JSON files to working direcory
response = urllib2.urlopen(downloadURL)
fileData = response.read()
# Save data to disk
output = open(outputFilename,'wb')
output.write(fileData)
output.close()
with open(outputFilename) as data_file:
jtaskruns.extend(json.load(data_file))
# Loop through and extract outlines
for a in range(0, len(jtaskruns)):
jtaskrun = jtaskruns[a] # one contributor
imtmp = Image.new("L", img.size, color=0)
draw = ImageDraw.Draw(imtmp)
# Loop through outline (or possible multiple outline polygons)
for outs in range(0, len(jtaskrun['info']['outline'])):
# Extract the outline and convert to tuples
o0 = jtaskrun['info']['outline'][outs][0]
p = [] # Empty list for outline vertices
h = img.size[1] # Get image height
for x in range(0, len(o0)):
xy = o0[x]
xy[1] = h - xy[1] # reverse y-coordinates
p.append(tuple(xy))
draw.polygon(tuple(p), fill=255)
# Loop through holes in same way
for hls in range(0, len(jtaskrun['info']['holes'])):
h0 = jtaskrun['info']['holes'][hls][0]
ph = []
for x in range(0, len(h0)):
xy = h0[x]
xy[1] = h - xy[1]
ph.append(tuple(xy))
draw.polygon(tuple(ph), fill=0)
# imtmp.show()
if jtaskrun['user_id'] is None:
fn = imnameonly + '_mask_' + str(a) + '_anon.JPG'
else:
fn = imnameonly + '_mask_' + str(a) + '_user' + str(jtaskrun['user_id']) + '.JPG'
imtmp.save(fn)
if a is 1:
fn1 = imnameonly + '_mask.JPG'
imtmp.save(fn1)
print("Done.")
| MicroPasts/MicroPasts-Scripts | photoMasking/photoMasking.py | Python | apache-2.0 | 4,679 |
#!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke
import csv
import re
csvfile = open('beijing_jt.csv','r')
reader = csv.reader(csvfile)
# reader.next() only can use in py2
next(reader)
jt_info = next(reader)
print(jt_info[1].decode('utf-8'))
csvfile.close()
# convert stations info format
station_pattern = (r'(?P<number>[0-9]+)\s(?P<name>\D+)')
station_list = []
stations = re.findall(station_pattern,jt_info[-1].decode('utf-8'))
for tmp in stations:
print(tmp[0],tmp[1].strip())
station_list.append(tmp[1].strip())
result={}
result[jt_info[1]]=station_list
print(result) | heibanke/python_do_something | Code/Chapter2/homework2-4_csv_ex.py | Python | apache-2.0 | 647 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines dataloader functionalities."""
import re
from typing import Any, Callable, Optional, Tuple
from clu import deterministic_data
import jax
from lib.datasets import billiard
from lib.datasets import trafficsigns
from lib.preprocess import image_ops
from lib.preprocess import preprocess_spec
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def get_dataset(
dataset: str,
global_batch_size: int,
rng: np.ndarray,
train_preprocessing_fn: Optional[Callable[[Any], Any]] = None,
eval_preprocessing_fn: Optional[Callable[[Any], Any]] = None,
num_epochs: Optional[int] = None,
filter_fn: Optional[Callable[[Any], Any]] = None,
**kwargs,
) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]:
"""Creates training and eval datasets.
The train dataset will be shuffled, while the eval dataset won't be.
Args:
dataset: Name of the dataset.
global_batch_size: Global batch size to use.
rng: PRNG seed used for shuffling.
train_preprocessing_fn: Function that will be applied on each single sample.
eval_preprocessing_fn: Optional preprocessing function specifically for eval
samples (if None, use train_preprocessing_fn for eval samples as well).
num_epochs: Number of epochs to repeat dataset (default=None, optional).
filter_fn: Funtion that filters samples according to some criteria.
**kwargs: Optional keyword arguments for specific datasets.
Returns:
A tuple consisting of a train dataset, an eval dataset as well as the
number of classes.
"""
del kwargs
rng, preprocess_rng = jax.random.split(rng)
if dataset.startswith("test_image_classification"):
# The shape of the dataset can be provided as suffix of the name of the
# dataset test_image_classification_batch_height_width.
match = re.search(r"test_image_classification_(\d+)_(\d+)_(\d+)_(\d+)",
dataset)
if match:
shape = tuple(int(match.group(i)) for i in [1, 2, 3, 4])
else:
shape = (13, 32, 32, 3)
images_tensor = tf.random.uniform(
shape, minval=0, maxval=256, dtype=tf.int32, seed=22432)
images_tensor = tf.cast(images_tensor, tf.uint8)
labels_tensor = tf.random.uniform((shape[0],),
minval=0,
maxval=10,
dtype=tf.int32,
seed=4202)
ds_image = tf.data.Dataset.from_tensor_slices(images_tensor)
ds_label = tf.data.Dataset.from_tensor_slices(labels_tensor)
ds = tf.data.Dataset.zip({"image": ds_image, "label": ds_label})
train_ds = ds
eval_ds = ds
num_classes = 10
elif dataset == "trafficsigns":
train_ds = trafficsigns.load("train")
eval_ds = trafficsigns.load("test")
num_classes = 4
elif dataset.startswith("billiard"):
# The format of the dataset string is "billiard-label_fn_str-{valid,test}"
# where label_fn_str options are specified in data/billiard.py
# Example: billiard-left-color-min-max-valid
parts = dataset.split("-")
label_fn_str = "-".join(parts[1:-1])
evaluation_split = parts[-1]
train_ds, num_classes = billiard.load_billiard("train", label_fn_str)
eval_ds, _ = billiard.load_billiard(evaluation_split, label_fn_str)
elif dataset.startswith("caltech_birds2011"):
mode = dataset[len("caltech_birds2011") + 1:]
train_ds, eval_ds, num_classes = _get_birds200_dataset(mode, rng)
elif dataset.startswith("test_image_classification"):
# The shape of the dataset can be provided as suffix of the name of the
# dataset test_image_classification_batch_height_width.
match = re.search(r"test_image_classification_(\d+)_(\d+)_(\d+)_(\d+)",
dataset)
if match:
shape = tuple(int(match.group(i)) for i in [1, 2, 3, 4])
else:
shape = (13, 32, 32, 3)
with tf.device("/CPU:0"):
images_tensor = tf.random.uniform(
shape, minval=0, maxval=256, dtype=tf.int32, seed=22432)
images_tensor = tf.cast(images_tensor, tf.uint8)
labels_tensor = tf.random.uniform((shape[0],),
minval=0,
maxval=10,
dtype=tf.int32,
seed=4202)
ds_image = tf.data.Dataset.from_tensor_slices(images_tensor)
ds_label = tf.data.Dataset.from_tensor_slices(labels_tensor)
ds = tf.data.Dataset.zip({"image": ds_image, "label": ds_label})
train_ds = ds
eval_ds = ds
num_classes = 10
else: # Should be a TFDS dataset.
train_ds, eval_ds, num_classes = _get_tfds_dataset(dataset, rng)
# Set up a preprocessing function.
if train_preprocessing_fn is None:
@tf.autograph.experimental.do_not_convert # Usually fails anyway.
def _image_preprocess_fn(features):
if "image" in features:
features["image"] = tf.cast(features["image"], tf.float32) / 255.0
if "id" in features: # Included in some TFDS datasets, breaks JAX.
del features["id"]
return features
train_preprocessing_fn = _image_preprocess_fn
if eval_preprocessing_fn is None:
eval_preprocessing_fn = train_preprocessing_fn
rng_train, rng_eval = jax.random.split(preprocess_rng)
train_ds = _prepare_dataset(
train_ds,
global_batch_size,
True,
rng_train,
train_preprocessing_fn,
num_epochs=num_epochs,
filter_fn=filter_fn)
eval_ds = _prepare_dataset(
eval_ds,
global_batch_size,
False,
rng_eval,
eval_preprocessing_fn,
num_epochs=1,
filter_fn=filter_fn)
return train_ds, eval_ds, num_classes
def _get_birds200_dataset(
mode: str,
rng: np.ndarray) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]:
"""Load the caltech_birds2011 dataset."""
assert jax.host_count() == 1, (
"caltech_birds2011 dataset does not support multihost training. "
"Found {} hosts.".format(jax.host_count()))
dataset_builder = tfds.builder("caltech_birds2011")
num_classes = 200
# Make sure each host uses a different RNG for the training data.
rng, data_rng = jax.random.split(rng)
data_rng = jax.random.fold_in(data_rng, jax.host_id())
data_rng, shuffle_rng = jax.random.split(data_rng)
if mode == "train-val":
read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0])
ds = dataset_builder.as_dataset(
split="train", shuffle_files=False, read_config=read_config)
train_ds = ds.take(5000).shuffle(5000, seed=shuffle_rng[0])
eval_ds = ds.skip(5000)
elif mode == "train-test":
train_split = "train"
eval_split = "test"
train_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0])
train_ds = dataset_builder.as_dataset(
split=train_split, shuffle_files=True, read_config=train_read_config)
eval_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[1])
eval_ds = dataset_builder.as_dataset(
split=eval_split, shuffle_files=False, read_config=eval_read_config)
else:
raise ValueError(f"Unknown mode: {mode}.")
return train_ds, eval_ds, num_classes
def _get_tfds_dataset(
dataset: str,
rng: np.ndarray) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]:
"""Loads a TFDS dataset."""
dataset_builder = tfds.builder(dataset)
num_classes = 0
if "label" in dataset_builder.info.features:
num_classes = dataset_builder.info.features["label"].num_classes
# Make sure each host uses a different RNG for the training data.
rng, data_rng = jax.random.split(rng)
data_rng = jax.random.fold_in(data_rng, jax.host_id())
data_rng, shuffle_rng = jax.random.split(data_rng)
train_split = deterministic_data.get_read_instruction_for_host(
"train", dataset_builder.info.splits["train"].num_examples)
train_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0])
train_ds = dataset_builder.as_dataset(
split=train_split, shuffle_files=True, read_config=train_read_config)
eval_split_name = {
"cifar10": "test",
"imagenet2012": "validation"
}.get(dataset, "test")
eval_split_size = dataset_builder.info.splits[eval_split_name].num_examples
eval_split = deterministic_data.get_read_instruction_for_host(
eval_split_name, eval_split_size)
eval_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[1])
eval_ds = dataset_builder.as_dataset(
split=eval_split, shuffle_files=False, read_config=eval_read_config)
return train_ds, eval_ds, num_classes
def _prepare_dataset(
dataset: tf.data.Dataset,
global_batch_size: int,
shuffle: bool,
rng: np.ndarray,
preprocess_fn: Optional[Callable[[Any], Any]] = None,
num_epochs: Optional[int] = None,
filter_fn: Optional[Callable[[Any], Any]] = None) -> tf.data.Dataset:
"""Batches, shuffles, prefetches and preprocesses a dataset.
Args:
dataset: The dataset to prepare.
global_batch_size: The global batch size to use.
shuffle: Whether the shuffle the data on example level.
rng: PRNG for seeding the shuffle operations.
preprocess_fn: Preprocessing function that will be applied to every example.
num_epochs: Number of epochs to repeat the dataset.
filter_fn: Funtion that filters samples according to some criteria.
Returns:
The dataset.
"""
if shuffle and rng is None:
raise ValueError("Shuffling without RNG is not supported.")
if global_batch_size % jax.host_count() != 0:
raise ValueError(f"Batch size {global_batch_size} not divisible by number "
f"of hosts ({jax.host_count()}).")
local_batch_size = global_batch_size // jax.host_count()
batch_dims = [jax.local_device_count(), local_batch_size]
# tf.data uses single integers as seed.
if rng is not None:
rng = rng[0]
ds = dataset.repeat(num_epochs)
if shuffle:
ds = ds.shuffle(1024, seed=rng)
if preprocess_fn is not None:
ds = ds.map(preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if filter_fn is not None:
ds = ds.filter(filter_fn)
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size, drop_remainder=True)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def parse_preprocessing_strings(training_string, eval_string):
"""Parses conjurer preprocessing strings."""
print(training_string)
print(image_ops.all_ops(), flush=True)
train_preprocessing_fn = preprocess_spec.parse(training_string,
image_ops.all_ops())
eval_preprocessing_fn = preprocess_spec.parse(eval_string,
image_ops.all_ops())
return train_preprocessing_fn, eval_preprocessing_fn
| google-research/google-research | ptopk_patch_selection/lib/data.py | Python | apache-2.0 | 11,369 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from typing import (
Any,
Callable,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
IO,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Union,
TypeVar,
cast,
overload,
TYPE_CHECKING,
)
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
Serializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
if TYPE_CHECKING:
import socket
import io
from pyspark._typing import NonUDFType
from pyspark._typing import S, NumberOrArray
from pyspark.context import SparkContext
from pyspark.sql.pandas._typing import (
PandasScalarUDFType,
PandasGroupedMapUDFType,
PandasGroupedAggUDFType,
PandasWindowAggUDFType,
PandasScalarIterUDFType,
PandasMapIterUDFType,
PandasCogroupedMapUDFType,
ArrowMapIterUDFType,
)
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import AtomicType, StructType
from pyspark.sql._typing import AtomicValue, RowLike, SQLBatchedUDFType
from py4j.java_gateway import JavaObject # type: ignore[import]
from py4j.java_collections import JavaArray # type: ignore[import]
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
U = TypeVar("U")
K = TypeVar("K", bound=Hashable)
V = TypeVar("V")
V1 = TypeVar("V1")
V2 = TypeVar("V2")
V3 = TypeVar("V3")
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF: "NonUDFType" = 0
SQL_BATCHED_UDF: "SQLBatchedUDFType" = 100
SQL_SCALAR_PANDAS_UDF: "PandasScalarUDFType" = 200
SQL_GROUPED_MAP_PANDAS_UDF: "PandasGroupedMapUDFType" = 201
SQL_GROUPED_AGG_PANDAS_UDF: "PandasGroupedAggUDFType" = 202
SQL_WINDOW_AGG_PANDAS_UDF: "PandasWindowAggUDFType" = 203
SQL_SCALAR_PANDAS_ITER_UDF: "PandasScalarIterUDFType" = 204
SQL_MAP_PANDAS_ITER_UDF: "PandasMapIterUDFType" = 205
SQL_COGROUPED_MAP_PANDAS_UDF: "PandasCogroupedMapUDFType" = 206
SQL_MAP_ARROW_ITER_UDF: "ArrowMapIterUDFType" = 207
def portable_hash(x: Hashable) -> int:
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
confidence: float
low: float
high: float
def __new__(cls, mean: float, confidence: float, low: float, high: float) -> "BoundedFloat":
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info: "JavaArray") -> "io.BufferedRWPair":
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
sockfile: "io.BufferedRWPair"
sock: "socket.socket"
port: int = sock_info[0]
auth_secret: str = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : :py:class:`Serializer`
The PySpark serializer to use
Returns
-------
result of :py:meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info: "JavaArray", _serializer: Serializer):
port: int
auth_secret: str
jsocket_auth_server: "JavaObject"
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter: Iterator[Any] = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self) -> Iterator[Any]:
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self) -> None:
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions: int, partitionFunc: Callable[[Any], int]):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k: Any) -> int:
return self.partitionFunc(k) % self.numPartitions
class RDD(Generic[T_co]):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(
self,
jrdd: "JavaObject",
ctx: "SparkContext",
jrdd_deserializer: Serializer = AutoBatchedSerializer(CPickleSerializer()),
):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner: Optional[Partitioner] = None
def _pickled(self: "RDD[T]") -> "RDD[T]":
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self) -> int:
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self) -> str:
return self._jrdd.toString()
def __getnewargs__(self) -> NoReturn:
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self) -> "SparkContext":
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self: "RDD[T]") -> "RDD[T]":
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self: "RDD[T]", storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY) -> "RDD[T]":
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self: "RDD[T]", blocking: bool = False) -> "RDD[T]":
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self) -> None:
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self) -> bool:
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self) -> None:
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self) -> bool:
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self) -> Optional[str]:
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
return checkpointFile.get() if checkpointFile.isDefined() else None
def map(self: "RDD[T]", f: Callable[[T], U], preservesPartitioning: bool = False) -> "RDD[U]":
"""
Return a new RDD by applying a function to each element of this RDD.
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(
self: "RDD[T]", f: Callable[[T], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(
self: "RDD[T]", f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. deprecated:: 0.9.0
use :py:meth:`RDD.mapPartitionsWithIndex` instead.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self) -> int:
"""
Returns the number of partitions in RDD
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self: "RDD[T]", f: Callable[[T], bool]) -> "RDD[T]":
"""
Return a new RDD containing only the elements that satisfy a predicate.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator: Iterable[T]) -> Iterable[T]:
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return a new RDD containing the distinct elements in this RDD.
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(
self: "RDD[T]", withReplacement: bool, fraction: float, seed: Optional[int] = None
) -> "RDD[T]":
"""
Return a sampled subset of this RDD.
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(
self: "RDD[T]", weights: Sequence[Union[int, float]], seed: Optional[int] = None
) -> "List[RDD[T]]":
"""
Randomly splits this RDD with the provided weights.
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split RDDs in a list
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(
self: "RDD[T]", withReplacement: bool, num: int, seed: Optional[int] = None
) -> List[T]:
"""
Return a fixed-size sampled subset of this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(
sampleSizeLowerBound: int, total: int, withReplacement: bool
) -> float:
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd: "RDD[Union[T, U]]" = RDD(
self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer
)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self: "RDD[T]", other: "RDD[T]") -> "RDD[T]":
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self: "RDD[T]", serializer: Optional[Serializer] = None) -> "RDD[T]":
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[S, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[["S"], int] = ...,
ascending: bool = ...,
) -> "RDD[Tuple[S, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int],
ascending: bool,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[[K], int] = ...,
ascending: bool = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[Any, Any]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[Any], int] = portable_hash,
ascending: bool = True,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[Any, Any]]":
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
@overload
def sortByKey(
self: "RDD[Tuple[S, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool,
numPartitions: int,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: Optional[bool] = True,
numPartitions: Optional[int] = None,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[K, V]]":
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k: K) -> int:
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p # type: ignore[operator]
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(
self: "RDD[T]",
keyfunc: Callable[[T], "S"],
ascending: bool = True,
numPartitions: Optional[int] = None,
) -> "RDD[T]":
"""
Sorts this RDD by the given keyfunc
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return (
self.keyBy(keyfunc) # type: ignore[type-var]
.sortByKey(ascending, numPartitions)
.values()
)
def glom(self: "RDD[T]") -> "RDD[List[T]]":
"""
Return an RDD created by coalescing all elements within each partition
into a list.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator: Iterable[T]) -> Iterable[List[T]]:
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(
self: "RDD[T]",
f: Callable[[T], K],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[T]]]":
"""
Return an RDD of grouped items.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(
self, command: str, env: Optional[Dict[str, str]] = None, checkCode: bool = False
) -> "RDD[str]":
"""
Return an RDD created by piping elements to a forked external process.
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether or not to check the return value of the shell command.
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator: Iterable[T]) -> Iterable[str]:
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out: IO[bytes]) -> None:
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code() -> Iterable[int]:
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
cast(bytes, x).rstrip(b"\n").decode("utf-8")
for x in chain(
iter(cast(IO[bytes], pipe.stdout).readline, b""), check_return_code()
)
)
return self.mapPartitions(func)
def foreach(self: "RDD[T]", f: Callable[[T], None]) -> None:
"""
Applies a function to all elements of this RDD.
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator: Iterable[T]) -> Iterable[Any]:
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self: "RDD[T]", f: Callable[[Iterable[T]], None]) -> None:
"""
Applies a function to each partition of this RDD.
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it: Iterable[T]) -> Iterable[Any]:
r = f(it)
try:
return iter(r) # type: ignore[call-overload]
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self: "RDD[T]") -> List[T]:
"""
Return a list that contains all of the elements in this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(
self: "RDD[T]", groupId: str, description: str, interruptOnCancel: bool = False
) -> "List[T]":
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self: "RDD[T]", f: Callable[[T, T], T]) -> T:
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self: "RDD[T]", f: Callable[[T, T], T], depth: int = 2) -> T:
"""
Reduces the elements of this RDD in a multi-level tree pattern.
Parameters
----------
f : function
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
# Use the second entry to indicate whether this is a dummy value.
zeroValue: Tuple[T, bool] = ( # type: ignore[assignment]
None,
True,
)
def op(x: Tuple[T, bool], y: Tuple[T, bool]) -> Tuple[T, bool]:
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False # type: ignore[arg-type]
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self: "RDD[T]", zeroValue: T, op: Callable[[T, T], T]) -> T:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator: Iterable[T]) -> Iterable[T]:
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(
self: "RDD[T]", zeroValue: U, seqOp: Callable[[U, T], U], combOp: Callable[[U, U], U]
) -> U:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(
self: "RDD[T]",
zeroValue: U,
seqOp: Callable[[U, T], U],
combOp: Callable[[U, U], U],
depth: int = 2,
) -> U:
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale # type: ignore[assignment]
curNumPartitions = int(numPartitions)
def mapPartition(i: int, iterator: Iterable[U]) -> Iterable[Tuple[int, U]]:
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
@overload
def max(self: "RDD[S]") -> "S":
...
@overload
def max(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def max(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the maximum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max) # type: ignore[arg-type]
return self.reduce(lambda a, b: max(a, b, key=key)) # type: ignore[arg-type]
@overload
def min(self: "RDD[S]") -> "S":
...
@overload
def min(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def min(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the minimum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min) # type: ignore[arg-type]
return self.reduce(lambda a, b: min(a, b, key=key)) # type: ignore[arg-type]
def sum(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Add up the elements in this RDD.
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold( # type: ignore[return-value]
0, operator.add
)
def count(self) -> int:
"""
Return the number of elements in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self: "RDD[NumberOrArray]") -> StatCounter:
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter: StatCounter, right_counter: StatCounter) -> StatCounter:
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce( # type: ignore[arg-type]
redFunc
)
def histogram(
self: "RDD[S]", buckets: Union[int, List["S"], Tuple["S", ...]]
) -> Tuple[Sequence["S"], List[int]]:
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x: Any) -> bool:
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a: Tuple["S", "S"], b: Tuple["S", "S"]) -> Tuple["S", "S"]:
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets # type: ignore[operator]
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv: # type: ignore[operator]
inc = (maxv - minv) * 1.0 / buckets # type: ignore[operator]
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [
buckets[i + 1] - buckets[i] # type: ignore[operator]
for i in range(len(buckets) - 1)
]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1) # type: ignore[operator]
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator: Iterable["S"]) -> Iterable[List[int]]:
counters = [0] * len(buckets) # type: ignore[arg-type]
for i in iterator:
if (
i is None
or (isinstance(i, float) and isnan(i)) # type: ignore[arg-type]
or i > maxv
or i < minv
):
continue
t = (
int((i - minv) / inc) # type: ignore[operator]
if even
else bisect.bisect_right(buckets, i) - 1 # type: ignore[arg-type]
)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a: List[int], b: List[int]) -> List[int]:
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the mean of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean() # type: ignore[return-value]
def variance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the variance of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance() # type: ignore[return-value]
def stdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the standard deviation of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev() # type: ignore[return-value]
def sampleStdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev() # type: ignore[return-value]
def sampleVariance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance() # type: ignore[return-value]
def countByValue(self: "RDD[K]") -> Dict[K, int]:
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator: Iterable[K]) -> Iterable[Dict[K, int]]:
counts: Dict[K, int] = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1: Dict[K, int], m2: Dict[K, int]) -> Dict[K, int]:
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
@overload
def top(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def top(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def top(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the top N elements from an RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator: Iterable[T]) -> Iterable[List[T]]:
yield heapq.nlargest(num, iterator, key=key)
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
@overload
def takeOrdered(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def takeOrdered(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def takeOrdered(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self: "RDD[T]", num: int) -> List[T]:
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items: List[T] = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self: "RDD[T]") -> T:
"""
Return the first element in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self) -> bool:
"""
Returns true if and only if the RDD contains no elements at all.
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
compressionCodecClass: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(
self: "RDD[Tuple[K, V]]", path: str, compressionCodecClass: Optional[str] = None
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path: str, batchSize: int = 10) -> None:
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
ser: Serializer
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path: str, compressionCodecClass: Optional[str] = None) -> None:
"""
Save this RDD as a text file, using string representations of elements.
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> from tempfile import NamedTemporaryFile
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from tempfile import NamedTemporaryFile
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split: int, iterator: Iterable[Any]) -> Iterable[bytes]:
for x in iterator:
if isinstance(x, bytes):
yield x
elif isinstance(x, str):
yield x.encode("utf-8")
else:
yield str(x).encode("utf-8")
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self: "RDD[Tuple[K, V]]") -> Dict[K, V]:
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self: "RDD[Tuple[K, V]]") -> "RDD[K]":
"""
Return an RDD with the keys of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self: "RDD[Tuple[K, V]]") -> "RDD[V]":
"""
Return an RDD with the values of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(
self: "RDD[Tuple[K, V]]",
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self: "RDD[Tuple[K, V]]", func: Callable[[V, V], V]) -> Dict[K, V]:
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Dict[K, V]]:
m: Dict[K, V] = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1: Dict[K, V], m2: Dict[K, V]) -> Dict[K, V]:
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self: "RDD[Tuple[K, V]]") -> Dict[K, int]:
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, U]]]":
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, Optional[U]]]]":
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], U]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], Optional[U]]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Return a copy of the RDD partitioned using the specified partitioner.
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split: int, iterator: Iterable[Tuple[K, V]]) -> Iterable[bytes]:
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000) # type: ignore[operator]
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v)) # type: ignore[operator]
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5) # type: ignore[assignment]
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd: "RDD[Tuple[K, V]]" = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self: "RDD[Tuple[K, V]]",
createCombiner: Callable[[V], U],
mergeValue: Callable[[U, V], U],
mergeCombiners: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator: Iterable[Tuple[K, U]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: U,
seqFunc: Callable[[U, V], U],
combFunc: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero() -> U:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: V,
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero() -> V:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self) -> int:
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[V]]]":
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x: V) -> List[V]:
return [x]
def mergeValue(xs: List[V], x: V) -> List[V]:
xs.append(x)
return xs
def mergeCombiners(a: List[V], b: List[V]) -> List[V]:
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it: Iterable[Tuple[K, List[V]]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(
self: "RDD[Tuple[K, V]]", f: Callable[[V], Iterable[U]]
) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
def flat_map_fn(kv: Tuple[K, V]) -> Iterable[Tuple[K, U]]:
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self: "RDD[Tuple[K, V]]", f: Callable[[V], U]) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
def map_values_fn(kv: Tuple[K, V]) -> Tuple[K, U]:
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]", __o1: "RDD[Tuple[K, V2]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1], ResultIterable[V2]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, V1]]",
_o1: "RDD[Tuple[K, V2]]",
_o2: "RDD[Tuple[K, V3]]",
) -> """RDD[
Tuple[
K,
Tuple[
ResultIterable[V],
ResultIterable[V1],
ResultIterable[V2],
ResultIterable[V3],
],
]
]""":
...
def groupWith( # type: ignore[misc]
self: "RDD[Tuple[Any, Any]]", other: "RDD[Tuple[Any, Any]]", *others: "RDD[Tuple[Any, Any]]"
) -> "RDD[Tuple[Any, Tuple[ResultIterable[Any], ...]]]":
"""
Alias for cogroup but with support for multiple RDDs.
Examples
--------
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[U]]]]":
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(
self: "RDD[Tuple[K, V]]",
withReplacement: bool,
fractions: Dict[K, Union[float, int]],
seed: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, Any]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair: Tuple[K, Tuple[V, Any]]) -> bool:
key, (val1, val2) = pair
return val1 and not val2 # type: ignore[return-value]
return (
self.cogroup(other, numPartitions)
.filter(filter_func) # type: ignore[arg-type]
.flatMapValues(lambda x: x[0])
)
def subtract(self: "RDD[T]", other: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return each value in `self` that is not contained in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self: "RDD[T]", f: Callable[[T], K]) -> "RDD[Tuple[K, T]]":
"""
Creates tuples of the elements in this RDD by applying `f`.
Examples
--------
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self: "RDD[T]", numPartitions: int) -> "RDD[T]":
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self: "RDD[T]", numPartitions: int, shuffle: bool = False) -> "RDD[T]":
"""
Return a new RDD that is reduced into `numPartitions` partitions.
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
Examples
--------
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser: Serializer) -> int:
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd: "RDD[V]", batchSize: int) -> "RDD[V]":
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self) -> Optional[str]:
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
return n if n else None
def setName(self: "RDD[T]", name: str) -> "RDD[T]":
"""
Assign a name to this RDD.
Examples
--------
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self) -> Optional[bytes]:
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
return debug_string.encode("utf-8") if debug_string else None
def getStorageLevel(self) -> StorageLevel:
"""
Get the RDD's current storage level.
Examples
--------
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self) -> int:
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self: "RDD[Tuple[K, V]]", key: K) -> List[V]:
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self) -> "JavaObject":
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
assert self.ctx._jvm is not None
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout: int, confidence: float = 0.95) -> int:
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self: "RDD[T]", relativeSD: float = 0.05) -> int:
"""
Return approximate number of distinct elements in the RDD.
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self: "RDD[T]", prefetchPartitions: bool = False) -> Iterator[T]:
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self: "RDD[T]") -> "RDDBarrier[T]":
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
pyspark.BarrierTaskContext
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self) -> bool:
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self: "RDD[T]", profile: ResourceProfile) -> "RDD[T]":
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
assert self.ctx._jvm is not None
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self) -> Optional[ResourceProfile]:
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
:py:class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
@overload
def toDF(
self: "RDD[RowLike]",
schema: Optional[Union[List[str], Tuple[str, ...]]] = None,
sampleRatio: Optional[float] = None,
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[RowLike]", schema: Optional[Union["StructType", str]] = None
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[AtomicValue]",
schema: Union["AtomicType", str],
) -> "DataFrame":
...
def toDF(
self: "RDD[Any]", schema: Optional[Any] = None, sampleRatio: Optional[float] = None
) -> "DataFrame":
raise RuntimeError("""RDD.toDF was called before SparkSession was initialized.""")
def _prepare_for_python_RDD(sc: "SparkContext", command: Any) -> Tuple[bytes, Any, Any, Any]:
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
assert sc._jvm is not None
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(
sc: "SparkContext", func: Callable, deserializer: Any, serializer: Any, profiler: Any = None
) -> "JavaObject":
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
assert sc._jvm is not None
return sc._jvm.PythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier(Generic[T]):
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd: RDD[T]):
self.rdd = rdd
def mapPartitions(
self, f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def func(s: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(
self,
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD[U], Generic[T, U]):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(
self,
prev: RDD[T],
func: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
isFromBarrier: bool = False,
):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func: Callable[[int, Iterable[V]], Iterable[T]] = prev.func
def pipeline_func(split: int, iterator: Iterable[V]) -> Iterable[U]:
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val: Optional["JavaObject"] = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self) -> int:
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self) -> "JavaObject":
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
assert self.ctx._jvm is not None
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
assert self._jrdd_val is not None
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self) -> int:
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self) -> bool:
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self) -> bool:
return self.is_barrier
def _test() -> None:
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| vinodkc/spark | python/pyspark/rdd.py | Python | apache-2.0 | 126,212 |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.virt.libvirt import fakelibvirt
from jacket.compute import utils
from jacket.compute.virt.libvirt import host
from jacket.compute.virt.libvirt.volume import volume
SECRET_UUID = '2a0a0d6c-babf-454d-b93e-9ac9957b95e0'
class FakeSecret(object):
def __init__(self):
self.uuid = SECRET_UUID
def getUUIDString(self):
return self.uuid
def UUIDString(self):
return self.uuid
def setValue(self, value):
self.value = value
return 0
def getValue(self, value):
return self.value
def undefine(self):
self.value = None
return 0
class LibvirtVolumeBaseTestCase(test.NoDBTestCase):
"""Contains common setup and helper methods for libvirt volume tests."""
def setUp(self):
super(LibvirtVolumeBaseTestCase, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
class FakeLibvirtDriver(object):
def __init__(self):
self._host = host.Host("qemu:///system")
def _get_all_block_devices(self):
return []
self.fake_conn = FakeLibvirtDriver()
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
'host': 'fake_host'
}
self.disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.name = 'volume-00000001'
self.location = '10.0.2.15:3260'
self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
self.vol = {'id': 1, 'name': self.name}
self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.user = 'foo'
def _assertFileTypeEquals(self, tree, file_path):
self.assertEqual('file', tree.get('type'))
self.assertEqual(file_path, tree.find('./source').get('file'))
class LibvirtISCSIVolumeBaseTestCase(LibvirtVolumeBaseTestCase):
"""Contains common setup and helper methods for iSCSI volume tests."""
def iscsi_connection(self, volume, location, iqn, auth=False,
transport=None):
dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
if transport is not None:
dev_name = 'pci-0000:00:00.0-' + dev_name
dev_path = '/dev/disk/by-path/%s' % (dev_name)
ret = {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'device_path': dev_path,
'qos_specs': {
'total_bytes_sec': '102400',
'read_iops_sec': '200',
}
}
}
if auth:
ret['data']['auth_method'] = 'CHAP'
ret['data']['auth_username'] = 'foo'
ret['data']['auth_password'] = 'bar'
return ret
class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase):
def _assertDiskInfoEquals(self, tree, disk_info):
self.assertEqual(disk_info['type'], tree.get('device'))
self.assertEqual(disk_info['bus'], tree.find('./target').get('bus'))
self.assertEqual(disk_info['dev'], tree.find('./target').get('dev'))
def _test_libvirt_volume_driver_disk_info(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertDiskInfoEquals(tree, self.disk_info)
def test_libvirt_volume_disk_info_type(self):
self.disk_info['type'] = 'cdrom'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_dev(self):
self.disk_info['dev'] = 'hdc'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_bus(self):
self.disk_info['bus'] = 'scsi'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_driver_serial(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
self.assertIsNone(tree.find('./blockio'))
self.assertIsNone(tree.find("driver[@discard]"))
def test_libvirt_volume_driver_blockio(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'logical_block_size': '4096',
'physical_block_size': '4096',
},
'serial': 'fake_serial',
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
blockio = tree.find('./blockio')
self.assertEqual('4096', blockio.get('logical_block_size'))
self.assertEqual('4096', blockio.get('physical_block_size'))
def test_libvirt_volume_driver_iotune(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'qos_specs': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
iotune = tree.find('./iotune')
# ensure invalid qos_specs is ignored
self.assertIsNone(iotune)
specs = {
'total_bytes_sec': '102400',
'read_bytes_sec': '51200',
'write_bytes_sec': '0',
'total_iops_sec': '0',
'read_iops_sec': '200',
'write_iops_sec': '200',
}
del connection_info['data']['qos_specs']
connection_info['data'].update(dict(qos_specs=specs))
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
def test_libvirt_volume_driver_readonly(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.assertRaises(exception.InvalidVolumeAccessMode,
libvirt_driver.get_config,
connection_info, self.disk_info)
connection_info['data']['access_mode'] = 'rw'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNone(readonly)
connection_info['data']['access_mode'] = 'ro'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
@mock.patch('compute.virt.libvirt.host.Host.has_min_version')
def test_libvirt_volume_driver_discard_true(self, mock_has_min_version):
# Check the discard attrib is present in driver section
mock_has_min_version.return_value = True
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': True,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
driver_node = tree.find("driver[@discard]")
self.assertIsNotNone(driver_node)
self.assertEqual('unmap', driver_node.attrib['discard'])
def test_libvirt_volume_driver_discard_false(self):
# Check the discard attrib is not present in driver section
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': False,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertIsNone(tree.find("driver[@discard]"))
@mock.patch('compute.virt.libvirt.host.Host.has_min_version')
def test_libvirt_volume_driver_discard_true_bad_version(
self, mock_has_min_version):
# Check the discard attrib is not present in driver section
mock_has_min_version.return_value = False
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': True,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertIsNone(tree.find("driver[@discard]"))
| HybridF5/jacket | jacket/tests/compute/unit/virt/libvirt/volume/test_volume.py | Python | apache-2.0 | 11,446 |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forwarding Rules Rule Scanner Test"""
from builtins import object
import unittest.mock as mock
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.scanner.scanners import forwarding_rule_scanner
from tests.unittest_utils import get_datafile_path
from google.cloud.forseti.common.gcp_type import forwarding_rule as fr
class ForwardingRule(object):
"""Represents ForwardRule resource."""
class ForwardingRuleScannerTest(ForsetiTestCase):
def test_forwarding_rules_scanner_all_match(self):
rules_local_path = get_datafile_path(__file__,
'forward_rule_test_1.yaml')
scanner = forwarding_rule_scanner.ForwardingRuleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
project_id = "abc-123"
gcp_forwarding_rules_resource_data = [
{
"id": "46",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.99",
"IPProtocol": "UDP",
"portRange": "4500-4500",
"ports": [],
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "EXTERNAL",
},
{
"id": "23",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.23",
"IPProtocol": "TCP",
"ports": [8080],
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "INTERNAL",
},
{
"id": "46",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.46",
"IPProtocol": "ESP",
"ports": [],
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "EXTERNAL",
},
{
"id": "46",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.35",
"IPProtocol": "TCP",
"portRange": "4500-4500",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "EXTERNAL",
}
]
gcp_forwarding_rules_resource_objs = []
for gcp_forwarding_rule_resource_data in gcp_forwarding_rules_resource_data:
gcp_forwarding_rules_resource_objs.append(
fr.ForwardingRule.from_dict(
project_id, '', gcp_forwarding_rule_resource_data))
violations = scanner._find_violations(gcp_forwarding_rules_resource_objs)
self.assertEqual(0, len(violations))
def test_forwarding_rules_scanner_no_match(self):
rules_local_path = get_datafile_path(__file__,
'forward_rule_test_1.yaml')
scanner = forwarding_rule_scanner.ForwardingRuleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
project_id = "abc-123"
gcp_forwarding_rules_resource_data = [
{
"id": "46",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.99",
"IPProtocol": "TCP",
"portRange": "4500-4500",
"ports": [],
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "EXTERNAL",
},
{
"id": "23",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.23",
"IPProtocol": "TCP",
"ports": [8081],
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "INTERNAL",
},
{
"id": "46",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.101.46",
"IPProtocol": "ESP",
"ports": [],
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "EXTERNAL",
},
{
"id": "46",
"creationTimestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"IPAddress": "198.51.100.35",
"IPProtocol": "TCP",
"portRange": "4400-4500",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"loadBalancingScheme": "EXTERNAL",
}
]
gcp_forwarding_rules_resource_objs = []
for gcp_forwarding_rule_resource_data in gcp_forwarding_rules_resource_data:
gcp_forwarding_rules_resource_objs.append(
fr.ForwardingRule.from_dict(
project_id, '', gcp_forwarding_rule_resource_data)
)
violations = scanner._find_violations(gcp_forwarding_rules_resource_objs)
self.assertEqual(4, len(violations))
if __name__ == '__main__':
unittest.main()
| forseti-security/forseti-security | tests/scanner/scanners/forwarding_rule_rules_scanner_test.py | Python | apache-2.0 | 7,464 |
# coding=utf-8
# Copyright 2020 Microsoft and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model DeBERTa."""
from typing import List, Optional
from ...tokenization_utils import AddedToken
from ...utils import logging
from ..gpt2.tokenization_gpt2 import GPT2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/vocab.json",
"microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/vocab.json",
"microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/vocab.json",
"microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/vocab.json",
"microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/vocab.json",
"microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/vocab.json",
},
"merges_file": {
"microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/merges.txt",
"microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/merges.txt",
"microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/merges.txt",
"microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/merges.txt",
"microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/merges.txt",
"microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/deberta-base": 512,
"microsoft/deberta-large": 512,
"microsoft/deberta-xlarge": 512,
"microsoft/deberta-base-mnli": 512,
"microsoft/deberta-large-mnli": 512,
"microsoft/deberta-xlarge-mnli": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/deberta-base": {"do_lower_case": False},
"microsoft/deberta-large": {"do_lower_case": False},
}
class DebertaTokenizer(GPT2Tokenizer):
r"""
Constructs a DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="[CLS]",
eos_token="[SEP]",
sep_token="[SEP]",
cls_token="[CLS]",
unk_token="[UNK]",
pad_token="[PAD]",
mask_token="[MASK]",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
| huggingface/transformers | src/transformers/models/deberta/tokenization_deberta.py | Python | apache-2.0 | 10,189 |
"""Support for LaMetric notifications."""
import logging
from requests.exceptions import ConnectionError as RequestsConnectionError
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_ICON
import homeassistant.helpers.config_validation as cv
from . import DOMAIN as LAMETRIC_DOMAIN
REQUIREMENTS = ['lmnotify==0.0.4']
_LOGGER = logging.getLogger(__name__)
AVAILABLE_PRIORITIES = ['info', 'warning', 'critical']
CONF_CYCLES = 'cycles'
CONF_LIFETIME = 'lifetime'
CONF_PRIORITY = 'priority'
DEPENDENCIES = ['lametric']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ICON, default='a7956'): cv.string,
vol.Optional(CONF_LIFETIME, default=10): cv.positive_int,
vol.Optional(CONF_CYCLES, default=1): cv.positive_int,
vol.Optional(CONF_PRIORITY, default='warning'):
vol.In(AVAILABLE_PRIORITIES),
})
def get_service(hass, config, discovery_info=None):
"""Get the LaMetric notification service."""
hlmn = hass.data.get(LAMETRIC_DOMAIN)
return LaMetricNotificationService(
hlmn, config[CONF_ICON], config[CONF_LIFETIME] * 1000,
config[CONF_CYCLES], config[CONF_PRIORITY])
class LaMetricNotificationService(BaseNotificationService):
"""Implement the notification service for LaMetric."""
def __init__(self, hasslametricmanager, icon, lifetime, cycles, priority):
"""Initialize the service."""
self.hasslametricmanager = hasslametricmanager
self._icon = icon
self._lifetime = lifetime
self._cycles = cycles
self._priority = priority
self._devices = []
def send_message(self, message="", **kwargs):
"""Send a message to some LaMetric device."""
from lmnotify import SimpleFrame, Sound, Model
from oauthlib.oauth2 import TokenExpiredError
targets = kwargs.get(ATTR_TARGET)
data = kwargs.get(ATTR_DATA)
_LOGGER.debug("Targets/Data: %s/%s", targets, data)
icon = self._icon
cycles = self._cycles
sound = None
priority = self._priority
# Additional data?
if data is not None:
if "icon" in data:
icon = data["icon"]
if "sound" in data:
try:
sound = Sound(category="notifications",
sound_id=data["sound"])
_LOGGER.debug("Adding notification sound %s",
data["sound"])
except AssertionError:
_LOGGER.error("Sound ID %s unknown, ignoring",
data["sound"])
if "cycles" in data:
cycles = int(data['cycles'])
if "priority" in data:
if data['priority'] in AVAILABLE_PRIORITIES:
priority = data['priority']
else:
_LOGGER.warning("Priority %s invalid, using default %s",
data['priority'], priority)
text_frame = SimpleFrame(icon, message)
_LOGGER.debug("Icon/Message/Cycles/Lifetime: %s, %s, %d, %d",
icon, message, self._cycles, self._lifetime)
frames = [text_frame]
model = Model(frames=frames, cycles=cycles, sound=sound)
lmn = self.hasslametricmanager.manager
try:
self._devices = lmn.get_devices()
except TokenExpiredError:
_LOGGER.debug("Token expired, fetching new token")
lmn.get_token()
self._devices = lmn.get_devices()
except RequestsConnectionError:
_LOGGER.warning("Problem connecting to LaMetric, "
"using cached devices instead")
for dev in self._devices:
if targets is None or dev["name"] in targets:
try:
lmn.set_device(dev)
lmn.send_notification(model, lifetime=self._lifetime,
priority=priority)
_LOGGER.debug("Sent notification to LaMetric %s",
dev["name"])
except OSError:
_LOGGER.warning("Cannot connect to LaMetric %s",
dev["name"])
| jamespcole/home-assistant | homeassistant/components/lametric/notify.py | Python | apache-2.0 | 4,402 |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import sys
from argparse import ArgumentTypeError
from datetime import datetime, timedelta
from c7n import cli, version, commands
from c7n.resolver import ValuesFrom
from c7n.resources import aws
from c7n.schema import ElementSchema, generate
from c7n.utils import yaml_dump, yaml_load
from .common import BaseTest, TextTestIO
class CliTest(BaseTest):
""" A subclass of BaseTest with some handy functions for CLI related tests. """
def patch_account_id(self):
def test_account_id(options):
options.account_id = self.account_id
self.patch(aws, "_default_account_id", test_account_id)
def get_output(self, argv):
""" Run cli.main with the supplied argv and return the output. """
out, err = self.run_and_expect_success(argv)
return out
def capture_output(self):
out = TextTestIO()
err = TextTestIO()
self.patch(sys, "stdout", out)
self.patch(sys, "stderr", err)
return out, err
def run_and_expect_success(self, argv):
""" Run cli.main() with supplied argv and expect normal execution. """
self.patch_account_id()
self.patch(sys, "argv", argv)
out, err = self.capture_output()
try:
cli.main()
except SystemExit as e:
self.fail(
"Expected sys.exit would not be called. Exit code was ({})".format(
e.code
)
)
return out.getvalue(), err.getvalue()
def run_and_expect_failure(self, argv, exit_code):
""" Run cli.main() with supplied argv and expect exit_code. """
self.patch_account_id()
self.patch(sys, "argv", argv)
out, err = self.capture_output()
# clear_resources()
with self.assertRaises(SystemExit) as cm:
cli.main()
self.assertEqual(cm.exception.code, exit_code)
return out.getvalue(), err.getvalue()
def run_and_expect_exception(self, argv, exception):
""" Run cli.main() with supplied argv and expect supplied exception. """
self.patch_account_id()
self.patch(sys, "argv", argv)
# clear_resources()
try:
cli.main()
except exception:
return
self.fail("Error: did not raise {}.".format(exception))
class UtilsTest(BaseTest):
def test_key_val_pair(self):
self.assertRaises(ArgumentTypeError, cli._key_val_pair, "invalid option")
param = "day=today"
self.assertIs(cli._key_val_pair(param), param)
class VersionTest(CliTest):
def test_version(self):
output = self.get_output(["custodian", "version"])
self.assertEqual(output.strip(), version.version)
def test_debug_version(self):
output = self.get_output(["custodian", "version", "--debug"])
self.assertIn(version.version, output)
self.assertIn('botocore==', output)
self.assertIn('python-dateutil==', output)
class ValidateTest(CliTest):
def test_invalidate_structure_exit(self):
invalid_policies = {"policies": [{"name": "foo"}]}
yaml_file = self.write_policy_file(invalid_policies)
self.run_and_expect_failure(["custodian", "validate", yaml_file], 1)
def test_validate(self):
invalid_policies = {
"policies": [
{
"name": "foo",
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [
{"type": "untag", "tags": {"custodian_cleanup": "yes"}}
],
}
]
}
yaml_file = self.write_policy_file(invalid_policies)
json_file = self.write_policy_file(invalid_policies, format="json")
# YAML validation
self.run_and_expect_exception(["custodian", "validate", yaml_file], SystemExit)
# JSON validation
self.run_and_expect_failure(["custodian", "validate", json_file], 1)
# no config files given
self.run_and_expect_failure(["custodian", "validate"], 1)
# nonexistent file given
self.run_and_expect_exception(
["custodian", "validate", "fake.yaml"], ValueError
)
valid_policies = {
"policies": [
{
"name": "foo",
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [{"type": "tag", "tags": {"custodian_cleanup": "yes"}}],
}
]
}
yaml_file = self.write_policy_file(valid_policies)
self.run_and_expect_success(["custodian", "validate", yaml_file])
# legacy -c option
self.run_and_expect_success(["custodian", "validate", "-c", yaml_file])
# duplicate policy names
self.run_and_expect_failure(["custodian", "validate", yaml_file, yaml_file], 1)
class SchemaTest(CliTest):
def test_schema_outline(self):
stdout, stderr = self.run_and_expect_success([
"custodian", "schema", "--outline", "--json", "aws"])
data = json.loads(stdout)
self.assertEqual(list(data.keys()), ["aws"])
self.assertTrue(len(data['aws']) > 100)
self.assertEqual(
sorted(data['aws']['aws.ec2'].keys()), ['actions', 'filters'])
self.assertTrue(len(data['aws']['aws.ec2']['actions']) > 10)
def test_schema_alias(self):
stdout, stderr = self.run_and_expect_success([
"custodian", "schema", "aws.network-addr"])
self.assertIn("aws.elastic-ip:", stdout)
def test_schema_alias_unqualified(self):
stdout, stderr = self.run_and_expect_success([
"custodian", "schema", "network-addr"])
self.assertIn("aws.elastic-ip:", stdout)
def test_schema(self):
# no options
stdout, stderr = self.run_and_expect_success(["custodian", "schema"])
data = yaml_load(stdout)
assert data['resources']
# summary option
self.run_and_expect_success(["custodian", "schema", "--summary"])
# json option
self.run_and_expect_success(["custodian", "schema", "--json"])
# with just a cloud
self.run_and_expect_success(["custodian", "schema", "aws"])
# with just a resource
self.run_and_expect_success(["custodian", "schema", "ec2"])
# with just a mode
self.run_and_expect_success(["custodian", "schema", "mode"])
# mode.type
self.run_and_expect_success(["custodian", "schema", "mode.phd"])
# resource.actions
self.run_and_expect_success(["custodian", "schema", "ec2.actions"])
# resource.filters
self.run_and_expect_success(["custodian", "schema", "ec2.filters"])
# specific item
self.run_and_expect_success(["custodian", "schema", "ec2.filters.tag-count"])
def test_invalid_options(self):
# invalid resource
self.run_and_expect_failure(["custodian", "schema", "fakeresource"], 1)
# invalid category
self.run_and_expect_failure(["custodian", "schema", "ec2.arglbargle"], 1)
# invalid item
self.run_and_expect_failure(
["custodian", "schema", "ec2.filters.nonexistent"], 1
)
# invalid number of selectors
self.run_and_expect_failure(["custodian", "schema", "ec2.filters.and.foo"], 1)
def test_schema_output(self):
output = self.get_output(["custodian", "schema"])
self.assertIn("aws.ec2", output)
# self.assertIn("azure.vm", output)
# self.assertIn("gcp.instance", output)
output = self.get_output(["custodian", "schema", "aws"])
self.assertIn("aws.ec2", output)
self.assertNotIn("azure.vm", output)
self.assertNotIn("gcp.instance", output)
output = self.get_output(["custodian", "schema", "aws.ec2"])
self.assertIn("actions:", output)
self.assertIn("filters:", output)
output = self.get_output(["custodian", "schema", "ec2"])
self.assertIn("actions:", output)
self.assertIn("filters:", output)
output = self.get_output(["custodian", "schema", "ec2.filters"])
self.assertNotIn("actions:", output)
self.assertIn("filters:", output)
output = self.get_output(["custodian", "schema", "ec2.filters.image"])
self.assertIn("Help", output)
def test_schema_expand(self):
# refs should only ever exist in a dictionary by itself
test_schema = {
'$ref': '#/definitions/filters_common/value_from'
}
result = ElementSchema.schema(generate()['definitions'], test_schema)
self.assertEqual(result, ValuesFrom.schema)
def test_schema_multi_expand(self):
test_schema = {
'schema1': {
'$ref': '#/definitions/filters_common/value_from'
},
'schema2': {
'$ref': '#/definitions/filters_common/value_from'
}
}
expected = yaml_dump({
'schema1': {
'type': 'object',
'additionalProperties': 'False',
'required': ['url'],
'properties': {
'url': {'type': 'string'},
'format': {'enum': ['csv', 'json', 'txt', 'csv2dict']},
'expr': {'oneOf': [
{'type': 'integer'},
{'type': 'string'}]}
}
},
'schema2': {
'type': 'object',
'additionalProperties': 'False',
'required': ['url'],
'properties': {
'url': {'type': 'string'},
'format': {'enum': ['csv', 'json', 'txt', 'csv2dict']},
'expr': {'oneOf': [
{'type': 'integer'},
{'type': 'string'}]}
}
}
})
result = yaml_dump(ElementSchema.schema(generate()['definitions'], test_schema))
self.assertEqual(result, expected)
def test_schema_expand_not_found(self):
test_schema = {
'$ref': '#/definitions/filters_common/invalid_schema'
}
result = ElementSchema.schema(generate()['definitions'], test_schema)
self.assertEqual(result, None)
class ReportTest(CliTest):
def test_report(self):
policy_name = "ec2-running-instances"
valid_policies = {
"policies": [
{
"name": policy_name,
"resource": "ec2",
"query": [{"instance-state-name": "running"}],
}
]
}
yaml_file = self.write_policy_file(valid_policies)
output = self.get_output(
["custodian", "report", "-s", self.output_dir, yaml_file]
)
self.assertIn("InstanceId", output)
self.assertIn("i-014296505597bf519", output)
# ASCII formatted test
output = self.get_output(
[
"custodian",
"report",
"--format",
"grid",
"-s",
self.output_dir,
yaml_file,
]
)
self.assertIn("InstanceId", output)
self.assertIn("i-014296505597bf519", output)
# json format
output = self.get_output(
["custodian", "report", "--format", "json", "-s", self.output_dir, yaml_file]
)
self.assertTrue("i-014296505597bf519", json.loads(output)[0]['InstanceId'])
# empty file
temp_dir = self.get_temp_dir()
empty_policies = {"policies": []}
yaml_file = self.write_policy_file(empty_policies)
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, yaml_file], 1
)
# more than 1 policy
policies = {
"policies": [
{"name": "foo", "resource": "s3"}, {"name": "bar", "resource": "ec2"}
]
}
yaml_file = self.write_policy_file(policies)
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, yaml_file], 1
)
def test_warning_on_empty_policy_filter(self):
# This test is to examine the warning output supplied when -p is used and
# the resulting policy set is empty. It is not specific to the `report`
# subcommand - it is also used by `run` and a few other subcommands.
policy_name = "test-policy"
valid_policies = {
"policies": [
{
"name": policy_name,
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
}
]
}
yaml_file = self.write_policy_file(valid_policies)
temp_dir = self.get_temp_dir()
bad_policy_name = policy_name + "-nonexistent"
log_output = self.capture_logging("custodian.commands")
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, "-p", bad_policy_name, yaml_file], 1
)
self.assertIn(policy_name, log_output.getvalue())
bad_resource_name = "foo"
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, "-t", bad_resource_name, yaml_file],
1,
)
class LogsTest(CliTest):
def test_logs(self):
temp_dir = self.get_temp_dir()
# Test 1 - empty file
empty_policies = {"policies": []}
yaml_file = self.write_policy_file(empty_policies)
self.run_and_expect_failure(["custodian", "logs", "-s", temp_dir, yaml_file], 1)
# Test 2 - more than one policy
policies = {
"policies": [
{"name": "foo", "resource": "s3"}, {"name": "bar", "resource": "ec2"}
]
}
yaml_file = self.write_policy_file(policies)
self.run_and_expect_failure(["custodian", "logs", "-s", temp_dir, yaml_file], 1)
# Test 3 - successful test
p_data = {
"name": "test-policy",
"resource": "rds",
"filters": [
{"key": "GroupName", "type": "security-group", "value": "default"}
],
"actions": [{"days": 10, "type": "retention"}],
}
yaml_file = self.write_policy_file({"policies": [p_data]})
output_dir = os.path.join(os.path.dirname(__file__), "data", "logs")
self.run_and_expect_failure(["custodian", "logs", "-s", output_dir, yaml_file], 1)
class RunTest(CliTest):
def test_ec2(self):
session_factory = self.replay_flight_data(
"test_ec2_state_transition_age_filter"
)
from c7n.policy import PolicyCollection
self.patch(
PolicyCollection,
"session_factory",
staticmethod(lambda x=None: session_factory),
)
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(
{
"policies": [
{
"name": "ec2-state-transition-age",
"resource": "ec2",
"filters": [
{"State.Name": "running"}, {"type": "state-age", "days": 30}
],
}
]
}
)
# TODO - capture logging and ensure the following
# self.assertIn('Running policy ec2-state-transition-age', logs)
# self.assertIn('metric:ResourceCount Count:1 policy:ec2-state-transition-age', logs)
self.run_and_expect_success(
[
"custodian",
"run",
"--cache",
temp_dir + "/cache",
"-s",
temp_dir,
yaml_file,
]
)
def test_error(self):
from c7n.policy import Policy
self.patch(
Policy, "__call__", lambda x: (_ for _ in ()).throw(Exception("foobar"))
)
#
# Make sure that if the policy causes an exception we error out
#
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(
{
"policies": [
{
"name": "error",
"resource": "ec2",
"filters": [
{"State.Name": "running"}, {"type": "state-age", "days": 30}
],
}
]
}
)
self.run_and_expect_failure(
[
"custodian",
"run",
"--cache",
temp_dir + "/cache",
"-s",
temp_dir,
yaml_file,
],
2,
)
#
# Test --debug
#
class CustomError(Exception):
pass
import pdb
self.patch(pdb, "post_mortem", lambda x: (_ for _ in ()).throw(CustomError))
self.run_and_expect_exception(
["custodian", "run", "-s", temp_dir, "--debug", yaml_file], CustomError
)
class MetricsTest(CliTest):
def test_metrics(self):
session_factory = self.replay_flight_data("test_lambda_policy_metrics")
from c7n.policy import PolicyCollection
self.patch(
PolicyCollection,
"session_factory",
staticmethod(lambda x=None: session_factory),
)
yaml_file = self.write_policy_file(
{
"policies": [
{
"name": "ec2-tag-compliance-v6",
"resource": "ec2",
"mode": {"type": "ec2-instance-state", "events": ["running"]},
"filters": [
{"tag:custodian_status": "absent"},
{
"or": [
{"tag:App": "absent"},
{"tag:Env": "absent"},
{"tag:Owner": "absent"},
]
},
],
}
]
}
)
end = datetime.utcnow()
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.run_and_expect_failure(
[
"custodian",
"metrics",
"--start",
str(start),
"--end",
str(end),
"--period",
str(period),
yaml_file,
],
1
)
def test_metrics_get_endpoints(self):
#
# Test for defaults when --start is not supplied
#
class FakeOptions:
start = end = None
days = 5
options = FakeOptions()
start, end = commands._metrics_get_endpoints(options)
self.assertEqual((end - start).days, options.days)
#
# Test that --start and --end have to be passed together
#
policy = {
"policies": [
{
"name": "metrics-test",
"resource": "ec2",
"query": [{"instance-state-name": "running"}],
}
]
}
yaml_file = self.write_policy_file(policy)
self.run_and_expect_failure(
["custodian", "metrics", "--start", "1", yaml_file], 1
)
class MiscTest(CliTest):
def test_no_args(self):
stdout, stderr = self.run_and_expect_failure(["custodian"], 2)
self.assertIn("metrics", stderr)
self.assertIn("logs", stderr)
def test_empty_policy_file(self):
# Doesn't do anything, but should exit 0
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file({})
self.run_and_expect_failure(
["custodian", "run", "-s", temp_dir, yaml_file], 1)
def test_nonexistent_policy_file(self):
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file({})
nonexistent = yaml_file + ".bad"
self.run_and_expect_failure(
["custodian", "run", "-s", temp_dir, yaml_file, nonexistent], 1
)
def test_duplicate_policy(self):
policy = {
"policies": [
{
"name": "metrics-test",
"resource": "ec2",
"query": [{"instance-state-name": "running"}],
}
]
}
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(policy)
self.run_and_expect_failure(
["custodian", "run", "-s", temp_dir, yaml_file, yaml_file], 1
)
def test_failure_with_no_default_region(self):
policy = {"policies": [{"name": "will-never-run", "resource": "ec2"}]}
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(policy)
self.patch(aws, "get_profile_session", lambda x: None)
self.run_and_expect_failure(["custodian", "run", "-s", temp_dir, yaml_file], 1)
| alfredgamulo/cloud-custodian | tests/test_cli.py | Python | apache-2.0 | 21,634 |
#!/usr/bin/env python
#
# Copyright 2016 Hannes Juutilainen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import json
import hashlib
import time
from autopkglib import Processor, ProcessorError
__all__ = ["VirusTotalAnalyzer"]
# VirusTotal was kind enough to give this processor its own API key so that it can be
# used as-is without further configuring. Please don't abuse this.
DEFAULT_API_KEY = "3858a94a911f47707717f6d090dbb8f86badb750b0f7bfe74a55c0c6143e3de6"
# Default options
DEFAULT_SLEEP = 15
ALWAYS_REPORT_DEFAULT = False
AUTO_SUBMIT_DEFAULT = False
AUTO_SUBMIT_MAX_SIZE_DEFAULT = 419430400 # 400MB
class VirusTotalAnalyzer(Processor):
"""Queries VirusTotal database for information about the given file"""
input_variables = {
"pathname": {
"required": False,
"description": "File path to analyze.",
},
"VIRUSTOTAL_ALWAYS_REPORT": {
"required": False,
"description": "Always request a report instead of only for new downloads",
},
"VIRUSTOTAL_AUTO_SUBMIT": {
"required": False,
"description": "If item is not found in VirusTotal database, automatically submit it for scanning.",
},
"CURL_PATH": {
"required": False,
"default": "/usr/bin/curl",
"description": "Path to curl binary. Defaults to /usr/bin/curl.",
},
}
output_variables = {
"virus_total_analyzer_summary_result": {
"description": "Description of interesting results."
},
}
description = __doc__
def fetch_content(self, url, headers=None, form_parameters=None, data_parameters=None, curl_options=None):
"""Returns content retrieved by curl, given an url and an optional
dictionaries of header-name/value mappings and parameters.
Logic here borrowed from URLTextSearcher processor.
Keyword arguments:
:param url: The URL to fetch
:type url: str None
:param headers: Dictionary of header-names and values
:type headers: dict None
:param form_parameters: Dictionary of items for '--form'
:type form_parameters: dict None
:param data_parameters: Dictionary of items for '--data'
:type data_parameters: dict None
:param curl_options: Array of arguments to pass to curl
:type curl_options: list None
:returns: content as string
"""
try:
cmd = [self.env['CURL_PATH'], '--location']
if curl_options:
cmd.extend(curl_options)
if headers:
for header, value in headers.items():
cmd.extend(['--header', '%s: %s' % (header, value)])
if form_parameters:
for form_parameter, value in form_parameters.items():
cmd.extend(['--form', '%s=%s' % (form_parameter, value)])
if data_parameters:
for data_parameter, value in data_parameters.items():
cmd.extend(['--data', '%s=%s' % (data_parameter, value)])
cmd.append(url)
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(data, stderr) = proc.communicate()
if proc.returncode:
raise ProcessorError(
'Could not retrieve URL %s: %s' % (url, stderr))
except OSError:
raise ProcessorError('Could not retrieve URL: %s' % url)
return data
def submit_file(self, file_path, api_key):
"""Submit a file to VirusTotal for scanning
:param file_path: Path to a file to upload
:param api_key: API key to use
:returns: JSON response
"""
url = "https://www.virustotal.com/vtapi/v2/file/scan/upload_url"
# Get the upload URL
parameters = {"apikey": api_key}
f = self.fetch_content(url, None, None, parameters, ["-G"])
try:
json_data = json.loads(f)
except (ValueError, KeyError, TypeError) as e:
self.output("Response was: %s" % f)
self.output("JSON format error: %s" % e)
json_data = json.loads(
'{"response_code": 999, "verbose_msg": "Requesting upload URL failed..."}')
return json_data
upload_url = json_data.get('upload_url', None)
if upload_url is None:
return None
# Upload the file
file_path_for_post = "@%s" % file_path
parameters = {"file": file_path_for_post, "apikey": api_key}
f = self.fetch_content(upload_url, None, parameters)
try:
json_data = json.loads(f)
except (ValueError, KeyError, TypeError) as e:
self.output("Response was: %s" % f)
self.output("JSON format error: %s" % e)
json_data = json.loads(
'{"response_code": 999, "verbose_msg": "Request failed, perhaps rate-limited..."}')
# print json.dumps(json_data, sort_keys=True, indent=4)
return json_data
def report_for_hash(self, file_hash, api_key):
"""Request a VirusTotal report for a hash
:param file_hash: md5, sha1 or sha256 hash
:param api_key: API key to use
:returns: JSON response
"""
url = "https://www.virustotal.com/vtapi/v2/file/report"
parameters = {"resource": file_hash, "apikey": api_key}
f = self.fetch_content(url, None, parameters)
try:
json_data = json.loads(f)
except (ValueError, KeyError, TypeError) as e:
self.output("JSON response was: %s" % f)
self.output("JSON format error: %s" % e)
json_data = json.loads(
'{"response_code": 999, "verbose_msg": "Request failed, perhaps rate-limited..."}')
# print json.dumps(json_data, sort_keys=True, indent=4)
return json_data
def calculate_sha256(self, file_path):
"""Calculates a SHA256 checksum
http://stackoverflow.com/a/3431838
:param file_path:
"""
hash_sha256 = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def main(self):
if self.env.get("VIRUSTOTAL_DISABLED", False):
self.output("Skipped VirusTotal analysis...")
return
input_path = self.env.get("pathname", None)
if not input_path:
self.output("Skipping VirusTotal analysis: no input path defined.")
return
# Get variables and arguments
sleep_seconds = int(self.env.get("VIRUSTOTAL_SLEEP_SECONDS", DEFAULT_SLEEP))
auto_submit = self.env.get("VIRUSTOTAL_AUTO_SUBMIT", AUTO_SUBMIT_DEFAULT)
auto_submit_max_size = int(self.env.get("VIRUSTOTAL_AUTO_SUBMIT_MAX_SIZE", AUTO_SUBMIT_MAX_SIZE_DEFAULT))
api_key = self.env.get("VIRUSTOTAL_API_KEY", DEFAULT_API_KEY)
if not api_key or api_key == "":
raise ProcessorError("No API key available")
force_report = self.env.get("VIRUSTOTAL_ALWAYS_REPORT",
ALWAYS_REPORT_DEFAULT)
if "download_changed" in self.env:
if not self.env["download_changed"] and not force_report:
# URLDownloader did not download new items,
# so skip the analysis
self.output("Skipping VirusTotal analysis: no new download.")
self.env["virustotal_result"] = "SKIPPED"
return
# Calculate the SHA256 hash of the file for submitting
self.output("Calculating checksum for %s" % input_path)
input_path_hash = self.calculate_sha256(input_path)
try:
last_virus_total_request = int(
os.environ.get('AUTOPKG_VIRUSTOTAL_LAST_RUN_TIME', 0))
except ValueError:
last_virus_total_request = 0
if last_virus_total_request and sleep_seconds > 0:
now = int(time.time())
next_time = last_virus_total_request + sleep_seconds
if now < next_time:
sleep_time = next_time - now
self.output(
"Sleeping %s seconds before requesting report..."
% sleep_time)
time.sleep(sleep_time)
# Request details for the calculated hash
self.output("Requesting report...")
json_data = self.report_for_hash(input_path_hash, api_key)
# Parse the report
response_code = json_data.get("response_code", None)
self.output("Response code: %s" % response_code)
if response_code == 0:
# VirusTotal database did not have a match for this hash
self.output("No information found for %s" % input_path)
if not auto_submit:
self.output(
"Consider submitting the file for analysis at https://www.virustotal.com/")
else:
if os.path.getsize(input_path) < auto_submit_max_size:
self.output("Submitting the file for analysis...")
json_data = self.submit_file(input_path, api_key)
response_code = json_data.get("response_code", None)
self.output("Response code: %s" % response_code)
verbose_msg = json_data.get("verbose_msg", None)
scan_id = json_data.get("scan_id", None)
permalink = json_data.get("permalink", None)
self.output("Message: %s" % verbose_msg)
self.output("Scan ID: %s" % scan_id)
self.output("Permalink: %s" % permalink)
else:
self.output("File is too large to submit...")
elif response_code == 1:
# VirusTotal gave us details about the file
verbose_msg = json_data.get("verbose_msg", None)
scan_id = json_data.get("scan_id", None)
num_positives = json_data.get("positives", 0)
num_total = json_data.get("total", 0)
scan_date = json_data.get("scan_date", None)
permalink = json_data.get("permalink", None)
self.output("Message: %s" % verbose_msg)
self.output("Scan ID: %s" % scan_id)
self.output("Detection ratio: %s/%s" % (num_positives, num_total))
self.output("Scan date: %s" % scan_date)
self.output("Permalink: %s" % permalink)
elif response_code == -2:
# Requested item is still queued for analysis
verbose_msg = json_data.get("verbose_msg", None)
scan_id = json_data.get("scan_id", None)
permalink = json_data.get("permalink", None)
self.output("Message: %s" % verbose_msg)
self.output("Scan ID: %s" % scan_id)
self.output("Permalink: %s" % permalink)
# Extract the information we need for the summary results
num_positives = json_data.get("positives", 0)
num_total = json_data.get("total", 0)
permalink = json_data.get("permalink", "None")
# record our time -- we use this to throttle our frequency
os.environ['AUTOPKG_VIRUSTOTAL_LAST_RUN_TIME'] = str(int(time.time()))
# Save summary result
self.env["virus_total_analyzer_summary_result"] = {
'summary_text': 'The following items were queried from the VirusTotal database:',
'report_fields': [
'name',
'ratio',
'permalink',
],
'data': {
'name': os.path.basename(input_path),
'ratio': "%s/%s" % (num_positives, num_total),
'permalink': permalink,
}
}
if __name__ == "__main__":
processor = VirusTotalAnalyzer()
processor.execute_shell()
| hjuutilainen/autopkg-virustotalanalyzer | VirusTotalAnalyzer/VirusTotalAnalyzer.py | Python | apache-2.0 | 12,577 |
import os
import subprocess
import errno
from JumpScale import j
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
if j.core.platformtype.myplatform.isLinux():
try:
import pxssh
except ImportError as e:
# We want this to go to stderr, otherwise applications relying on stdout
# output (build command generator scripts) are pretty busted.
#print >> sys.stderr, "Module pxssh not found...Wont be able to ssh on linux!!!"
print("cannot find pxssh")
pass
if j.core.platformtype.myplatform.isUnix():
try:
import pexpect
except ImportError as e:
print("did not find pexpect")
j.core.platformtype.myplatform.isLinux()
try:
j.sal.ubuntu.apt_install("python-pexpect")
except:
pass
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
print("close stdin")
return self._close('stdin')
except (subprocess.pywintypes.error, Exception) as why:
if why[0] in (109, errno.ESHUTDOWN):
print("close stdin")
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception) as why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
if self.universal_newlines:
read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError as why:
if why[0] == errno.EPIPE: # broken pipe
print("close stdin")
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
if self.universal_newlines:
r = self._translate_newlines(r)
return r
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
class ExpectTool:
def __init__(self):
self.__jslocation__ = "j.tools.expect"
@staticmethod
def new(cmd=None):
'''Create a new Expect session
@param cmd: Command to execute
@type cmd: string
@returns: Expect session
@rtype jumpscale.cmdline.Expect.Expect
'''
return Expect(cmd=cmd or '')
class Expect:
_p = None # popen process
error = False
_lastsend = ""
_ignoreStdError = False
_ignoreLineFilter = []
_lastOutput = "" # stdOut from last send
_lastError = "" # stdError from last send
_cleanStringEnabled = True # if True every output will be cleaned from ansi codes
_timeout = False # if true a send&wait statement has timed out
_waitTokens = [] # list of tokens where we wait on when executing
def __init__(self, cmd=""):
j.logger.addConsoleLogCategory("expect")
PIPE = subprocess.PIPE
self._prompt = ""
if not cmd:
if cmd == "" and j.core.platformtype.myplatform.isWindows():
cmd = 'cmd'
if cmd == "" and not j.core.platformtype.myplatform.isWindows():
cmd = 'sh'
self._pxssh = pxssh.pxssh()
self._p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
elif cmd and cmd != 'ssh' and not j.core.platformtype.myplatform.isWindows():
self.pexpect = pexpect.spawn(cmd)
if cmd == "sh":
self.expect("#")
self.setPrompt()
self.prompt()
self.enableCleanString()
def log(self, message, category="", level=5):
category = "expect.%s" % category
category = category.strip(".")
j.logger.log(message, category=category, level=level)
def enableCleanString(self):
"""
All output will be cleaned from ANSI code and other unwanted garbage
"""
self._cleanStringEnabled = True
def disableCleanString(self):
"""
Disable output cleaning, e.g. stripping ANSI code
"""
self._cleanStringEnabled = False
def _add2lastOutput(self, str):
self._lastOutput = self._lastOutput + str
def _add2lastError(self, str):
self._lastError = self._lastError + str
def setIgnoreStdError(self):
"""
Disable display of stderr error messages to the standard output
"""
self._ignoreStdError = True
def unsetIgnoreStdError(self):
"""
Enable display error output (stderr)
"""
self._ignoreStdError = False
def addIgnoreLineFilter(self, filter):
"""
Add a filter on output lines. Lines matching the provided filter will not be displayed on stdout or stderr.
"""
self._ignoreLineFilter.append(filter)
def addWaitToken(self, token):
"""
Adds a token that we will wait for when using C{self.wait()}
"""
self._waitTokens.append(token)
def resetWaitTokens(self, token):
"""
Remove all tokens we'd wait for in self.wait()
"""
self._waitTokens = []
def clearReceive(self):
self._lastOutput = ""
self._lastError = ""
def login(self, remote, passwd, seedpasswd, initial=False, timeout=10):
# login over ssh
self.send("ssh root@%s" % remote)
if initial:
result = self.expect("continue connecting", timeout=2)
self.send("yes\n")
result = self.expect("password:", timeout=timeout)
if result == "E":
print("did not see passwd")
result = self.expect("continue connecting", timeout=timeout / 2)
if result == 0:
print("saw confirmation ssh key")
print("send yes for ssh key")
self.send("yes\n")
result = self.expect("password:", timeout=timeout / 2)
else:
raise j.exceptions.RuntimeError(
"Could not login with std passwd nor with seedpasswd, did not get passwd str")
if result != "E":
# we saw passwd
self.send(passwd)
result = self.expect("#", timeout=timeout / 2)
if result == "E":
result = self.expect("Permission denied")
if result != "E" and seedpasswd != "":
print("permission denied, will try to use seedpasswd")
self.send(seedpasswd)
result = self.expect("#")
if result == "E":
raise j.exceptions.RuntimeError("could not login with std passwd nor with seedpasswd")
print("seedpasswd worked")
print("change passwd")
self.send("passwd")
result = self.expect("password:")
if result == "E":
raise j.exceptions.RuntimeError("did not get passwd prompt.")
self.send(passwd)
result = self.expect("password:")
if result == "E":
raise j.exceptions.RuntimeError("did not get passwd prompt.")
self.send(passwd)
result = self.expect("#")
if result == "E":
raise j.exceptions.RuntimeError("could not change passwd")
return
else:
raise j.exceptions.RuntimeError("Could not login did not see permission denied.")
else:
return
if result != "E":
# we saw passwd
self.send(passwd)
result = self.expect("#")
if result == "E":
raise j.exceptions.RuntimeError("could not login")
return
else:
# did not see passwd again
raise j.exceptions.RuntimeError("Did not see passwd request, could not login")
return
# def login(self, ip, login, password, login_timeout=15):
# """Log the user into the given server
# By default the prompt is rather optimistic and should be considered more of
# an example. It is better to try to match the prompt as exactly as possible to prevent
# any false matches by server strings such as a "Message Of The Day" or something.
# The closer you can make the original_prompt match your real prompt the better.
# A timeout causes not necessarily the login to fail.
# In case of a time out we assume that the prompt was so weird that we could not match
# it. We still try to reset the prompt to something more unique.
# If that still fails then we return False.
# """
# if not j.core.platformtype.myplatform.isLinux():
# raise j.exceptions.RuntimeError('pexpect/pxssh not supported on this platform')
# if not self._pxssh.login(ip, login, password, login_timeout=login_timeout):
# raise ValueError('Could not connect to %s, check either login/password are not correct or host is not reacheable over SSH.'%ip)
# else:
# j.logger.log('SSH %s@%s session login successful' % (login, ip), 6)
def logout(self):
"""This sends exit. If there are stopped jobs then this sends exit twice.
"""
self.send('logout')
def receive(self):
"""
Receive standard out, stderror if available
return stdout,stderror
"""
if j.core.platformtype.myplatform.isWindows():
out = self.receiveOut()
err = self.receiveError()
return out, err
elif j.core.platformtype.myplatform.isUnix() and self.pexpect:
if self.pexpect.match:
# out='%s%s'%(self.pexpect.after, self.pexpect.buffer)
out = self.pexpect.before
out = self._cleanStr(out)
return out, ""
else:
before = self.pexpect.before
before = self._cleanStr(before)
return str(before), ""
elif j.core.platformtype.myplatform.isLinux() and not self.pexpect:
return str(self._pxssh).before, ""
o.errorconditionhandler.raiseBug(
message="should never come here, unsupported platform", category="expect.receive")
def receivePrint(self):
"""
Receive data from stdout and stderr and displays them
This function also remembers this information for later usage in the
classes C{_out} & C{_error}.
"""
out, err = self.receive()
print(out)
if err != "":
print("ERROR:")
print(err)
def _receiveOut(self): # windows only
"""
Receive standard out and return. This information is stored for later usage
in the class C{_out}.
"""
out = self._receive(False)
if self._cleanStringEnabled:
out = self._cleanStr(out)
self._add2lastOutput(out)
j.logger.log("stdout:%s" % out, 9)
return out
# TODO: P2 not right,can never work, needs to check if expect or popen or, ...
def _receiveError(self): # windows only
"""
Receive standard error and return. This information is stored for later usage
in the class C{_error}.
"""
err = self._receive(True)
if self._cleanStringEnabled:
err = self._cleanStr(err)
self._add2lastError(err)
return err
# TODO: P2 not right,can never work, needs to check if expect or popen or, ...
def pprint(self):
"""
Print the result of all send & receive operations till now on local C{stdout}.
"""
out = self._ignoreLinesBasedOnFilter(self._lastOutput)
error = self._lastError
if(error != ""):
j.tools.console.echo("%s/nerror:%s" % (out, error))
else:
j.tools.console.echo(out)
# def _receive(self,checkError=False):
# #stdin=self._stdin
# #stdout=self._stdout
# t=.1
# e=1
# tr=5
# p=self._p
# if tr < 1:
# tr = 1
# x = time.time()+t
# y = []
# r = ''
# pr = p.recv
# #check error
# if checkError:
# pr = p.recv_err
# while time.time() < x or r:
# r = pr()
# if r is None:
# if e:
# raise Exception("Exception occured")
# else:
# break
# elif r:
# y.append(r)
# else:
# time.sleep(max((x-time.time())/tr, 0))
# returnval=''.join(y)
# returnval=returnval.replace("\\n","\n")
# returnval=returnval.replace("\\r","\r")
# returnval=self._cleanStr(returnval)
# if returnval != "" and checkError:
# self.error=True
# return returnval
def _cleanStr(self, s):
"""
Remove most ANSI characters (screen emulation).
Remove double prompts (if used e.g. with remote ssh).
"""
state = "start"
# s=s.encode('ascii')
strclean = ""
# s=s.replace(unichr(27)+"]0;","")
s = self._strRemovePromptSSHAnsi(s)
for item in s:
if self._ansiCheckStart(item):
state = "ignore"
teller = 0
if state != "ignore":
strclean = strclean + item
if state == "ignore" and self._ansiCheckStop(item):
state = "ok"
strclean = strclean.replace(chr(27) + chr(7), "")
strclean = strclean.replace(chr(27) + chr(8), "")
strclean = strclean.replace(chr(7), "")
return strclean
def _strRemovePromptSSHAnsi(self, s):
state = "start"
strclean = ""
for t in range(0, len(s)):
if t + 3 < len(s):
find = s[t] + s[t + 1] + s[t + 2] + s[t + 3]
else:
find = ""
if find == chr(27) + "]0;":
# found prompt
state = "ignore"
if state != "ignore":
strclean = strclean + s[t]
if state == "ignore" and s[t] == chr(7):
state = "ok"
return strclean
def _ansiCheckStart(self, s):
pattern = [27]
found = False
for item in pattern:
if ord(s) == item:
found = True
return found
def _ansiCheckStop(self, s):
pattern = "cnRhlL()HABCDfsurMHgKJipm"
found = False
for item in pattern:
if ord(s) == ord(item):
found = True
return found
def send(self, data="", newline=True):
"""
Send a command to shell.
After sending a command, one of the receive functions must be called to
check for the result on C{stdout} or C{stderr}.
"""
self.log("send: %s" % data, category="send")
self._lastsend = data
self._lastOutput = ""
self._lastError = ""
if j.core.platformtype.myplatform.isUnix():
if self.pexpect:
if newline:
data = data.rstrip("\n")
return self.pexpect.sendline(data)
else:
return self.pexpect.send(data)
if j.core.platformtype.myplatform.isWindows():
data = data + "\r\n"
p = self._p
if len(data) != 0:
if j.core.platformtype.myplatform.isWindows():
sent = p.send(data)
if sent is None:
raise Exception("ERROR: Data sent is none")
data = buffer(data, sent)
elif j.core.platformtype.myplatform.isLinux():
self._pxssh.sendline(data)
# def read(self):
# o=self.pexpect.read_nonblocking()
# out=""
# while o != "":
# print o,
# o=self.pexpect.read_nonblocking()
# out+=o
# return out
def setPrompt(self, prompt="#.#.#"):
self.send("PS1='%s'" % prompt)
self._prompt = prompt
self.prompt()
def executeSequence(self, sequence, cmd):
"""
sequence=[[regex1,tosend,stepname,timeout],...]
timeout is optional, also stepname is optional
at end it waits for prompt
"""
self.send(cmd)
out = ""
m = len(sequence)
nr = 0
for item in sequence:
nr += 1
if len(item) == 2:
regex = item[0]
tosend = item[1]
stepname = nr
timeout = 10
elif len(item) == 3:
regex = item[0]
tosend = item[1]
stepname = item[2]
timeout = 10
elif len(item) == 4:
regex = item[0]
tosend = item[1]
stepname = item[2]
timeout = item[3]
else:
raise j.exceptions.RuntimeError("Error in syntax sequence,\n%s" % sequence)
result = self.expect([regex, self._prompt], timeout=timeout)
if result == 0 or nr == m:
o = self.receive()[0]
o += "\nSTEP: %s: %s\n%s\n" % (nr, stepname, o)
out += "%s\n" % o
print(o)
self.send(tosend, False)
elif result is False:
raise j.exceptions.RuntimeError("Timeout in execution of sequence.\nError:\n%s" % o)
else:
raise j.exceptions.RuntimeError("Error in execution of sequence.\nError:\n%s" % o)
return self.prompt()
def prompt(self, timeout=5):
"""Expect the prompt.
Return C{True} if the prompt was matched.
Returns C{False} if there was a time out.
"""
self.expect(self._prompt, timeout=timeout)
def _removeFirstLine(self, text):
lines = text.splitlines()
linenr = 0
cleanstr = ""
for line in lines:
linenr = linenr + 1
if(linenr != 1):
cleanstr = cleanstr + line + "\n"
return cleanstr
def execShellCmd(self, cmd, timeout=30):
"""
execute a command and wait on the prompt
"""
self.send(cmd)
self.prompt(timeout=timeout)
out, err = self.receive()
return out
def do(self, data, timeout=30):
"""
This function is a combination of the functions C{send}, C{receive} and C{print}.
The first line is also removed (this is the echo from what has been sent).
Use this if you quickly want to execute something from the command line.
"""
self.send(data)
self.wait(timeout)
self._lastOutput = self._removeFirstLine(self._lastOutput)
self.pprint()
# def waitTillEnd(self):
# """
# TODO: not clear what it does anw why needed
# """
# self._p.wait()
def _checkForTokens(self, text):
if text == "":
return 0
text = text.lower()
tokens = self._waitTokens
tokennr = 0
for token in tokens:
#j.logger.log("checktoken %s : %s" % (token,text))
tokennr = tokennr + 1
token = token.lower()
if text.find(token) != -1:
# token found
j.logger.log("Found token:%s" % token, 9)
return tokennr
return 0
def _ignoreLinesBasedOnFilter(self, str):
lines = str.splitlines()
returnstr = ""
for line in lines:
foundmatch = False
for filter in self._ignoreLineFilter:
# print line
# print filter
if line.find(filter) != -1:
j.logger.log("Found ignore line:%s:%s" % (filter, line), 9)
foundmatch = True
if foundmatch is False:
returnstr = returnstr + line + "\n"
return returnstr
def wait(self, timeoutval=30):
"""
Wait until we detect tokens (see L{addWaitToken})
@param timeoutval: time in seconds we maximum will wait
"""
self.log("wait: %s sec" % timeoutval, category="wait")
timeout = False
starttime = j.data.time.getTimeEpoch()
r = "" # full return
returnpart = "" # one time return after receive
done = False # status param
tokenfound = 0
self._timeout = False
while(timeout is False and done is False):
returnpart, err = self.receive()
print(returnpart)
tokenfound = self._checkForTokens(returnpart)
# j.logger.log("tokenfound:%s"%tokenfound)
returnpart = self._ignoreLinesBasedOnFilter(returnpart)
r = r + returnpart
curtime = j.data.time.getTimeEpoch()
j.logger.log("TimeoutCheck on waitreceive: %s %s %s" % (curtime, starttime, timeoutval), 8)
if(curtime - starttime > timeoutval):
j.logger.log("WARNING: execute %s timed out (timeout was %s)" % (self._lastsend, timeoutval), 6)
timeout = True
if tokenfound > 0:
done = True
out, err = self.receive()
r = r + out
if timeout:
r = ""
self._timeout = True
return tokenfound, r, timeout
def expect(self, outputToExpect, timeout=2):
"""
Pexpect expect method wrapper
usage: Excuting a command that expects user input, this method can be used to
expect the question asked then send the answer
Example:
Expect = j.tools.expect.new('passwd')
if Expect.expect('Enter new'):
Expect.send('newPasswd')
if Expect.expect('Retype new'):
Expect.send('anotherPasswd')
if Expect.expect('passwords do not match'):
j.tools.console.echo(Expect.receive())
else:
j.tools.console.echo(Expect.receive())
@return 'E' when error
"""
j.logger.log('Expect %s ' % outputToExpect, 7)
try:
result = self.pexpect.expect(outputToExpect, timeout=timeout)
return result
except:
msg = 'Failed to expect \"%s\", found \"%s\" instead' % (outputToExpect, self.receive())
# print msg
j.logger.log(msg, 7)
return "E"
| Jumpscale/jumpscale_core8 | lib/JumpScale/tools/expect/Expect.py | Python | apache-2.0 | 25,028 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ZiplineError(Exception):
msg = None
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.message = str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the override_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to override slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class OverrideSlippagePostInit(ZiplineError):
# Raised if a users script calls override_slippage magic
# after the initialize method has returned.
msg = """
You attempted to override slippage outside of `initialize`. \
You may only call override_slippage in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the override_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to override commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class OverrideCommissionPostInit(ZiplineError):
"""
Raised if a users script calls override_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call override_commission in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options:{options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class SidNotFound(ZiplineError):
"""
Raised when a retrieve_asset() call contains a non-existent sid.
"""
msg = """
Asset with sid '{sid}' was not found.
""".strip()
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class InputTermNotAtomic(ZiplineError):
"""
Raised when a non-atomic term is specified as an input to a Pipeline API
term with a lookback window.
"""
msg = (
"Can't compute {parent} with non-atomic input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and 100.0, and min must be "
"less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize()."
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
msg = "CustomFactors with dtype {dtype} are not supported."
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class PositionTrackerMissingAssetFinder(ZiplineError):
"""
Raised by a PositionTracker if it is asked to update an Asset but does not
have an AssetFinder
"""
msg = (
"PositionTracker attempted to update its Asset information but does "
"not have an AssetFinder. This may be caused by a failure to properly "
"de-serialize a TradingAlgorithm."
)
| jimgoo/zipline-fork | zipline/errors.py | Python | apache-2.0 | 12,167 |
# Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
:returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class CeresSlice(object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
pass
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior
| graphite-project/ceres | ceres.py | Python | apache-2.0 | 31,634 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteRegistration
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-domains
# [START domains_v1_generated_Domains_DeleteRegistration_sync]
from google.cloud import domains_v1
def sample_delete_registration():
# Create a client
client = domains_v1.DomainsClient()
# Initialize request argument(s)
request = domains_v1.DeleteRegistrationRequest(
name="name_value",
)
# Make the request
operation = client.delete_registration(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END domains_v1_generated_Domains_DeleteRegistration_sync]
| googleapis/python-domains | samples/generated_samples/domains_v1_generated_domains_delete_registration_sync.py | Python | apache-2.0 | 1,539 |
from Bio import SeqIO
import sys, string
fasta_file = "/Users/saljh8/GitHub/altanalyze/AltDatabase/EnsMart72/Hs/SequenceData/Homo_sapiens.GRCh37.72.cdna.all.fa" # Input fasta file
result_file = "/Users/saljh8/GitHub/altanalyze/AltDatabase/EnsMart72/Hs/SequenceData/Homo_sapiens.GRCh37.72.cdna.all.filtered.fa" # Output fasta file
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
chr = string.split(seq.description,':')[3]
try:
float(chr)
SeqIO.write([seq], f, "fasta")
except: continue | nsalomonis/AltAnalyze | import_scripts/filterFASTA.py | Python | apache-2.0 | 619 |
def getitem(v,d):
"Returns the value of entry d in v"
assert d in v.D
return v.f[d] if d in v.f else 0
def setitem(v,d,val):
"Set the element of v with label d to be val"
assert d in v.D
v.f[d] = val
def equal(u,v):
"Returns true iff u is equal to v"
assert u.D == v.D
union = set(u.f) | set (v.f)
for k in union:
uval = u.f[k] if k in u.f else 0
vval = v.f[k] if k in v.f else 0
if uval != vval:
return False
return True
def add(u,v):
"Returns the sum of the two vectors"
assert u.D == v.D
ukeys = set(u.f)
vkeys = set (v.f)
both = ukeys & vkeys
uonly = ukeys - both
vonly = vkeys - both
f = {}
for k in both:
f[k] = u.f[k] + v.f[k]
for k in uonly:
f[k] = u.f[k]
for k in vonly:
f[k] = v.f[k]
return Vec (u.D | v.D, f)
def dot(u,v):
"Returns the dot product of the two vectors"
assert u.D == v.D
ukeys = set(u.f)
vkeys = set (v.f)
both = ukeys & vkeys
return sum([u.f[k] * v.f[k] for k in both])
def scalar_mul(v, alpha):
"Returns the scalar-vector product alpha times v"
f = {k: alpha * v.f[k] for k in v.f}
return (Vec(v.D, f))
def neg(v):
"Returns the negation of a vector"
return scalar_mul (v, -1)
def toStr(v):
"pretty-printing"
try:
D_list = sorted(v.D)
except TypeError:
D_list = sorted(v.D, key=hash)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
# w = 1+max([len(str(k)) for k in D_list]+[len('{0:.{1}G}'.format(value,numdec)) for value in v.f.values()])
s1 = ''.join(['{0:>{1}}'.format(k,wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
##### NO NEED TO MODIFY BELOW HERE #####
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
# def __sub__(self, a,b):
# "Returns a vector which is the difference of a and b."
# return a+(-b)
def __sub__(self, other):
"Returns a vector which is the difference of a and b."
return self+(-other)
__eq__ = equal
__str__ = toStr
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
| tri2sing/LinearAlgebraPython | vec.py | Python | apache-2.0 | 3,573 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from oslo.config import cfg
from quantum.agent.linux import ip_lib
from quantum.agent.linux import utils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Quantum spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo', namespace=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
def enable(self, cmd_callback):
if not self.active:
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
else:
# For normal sudo prepend the env vars before command
utils.execute(cmd, self.root_helper)
def disable(self):
pid = self.pid
if self.active:
cmd = ['kill', '-9', pid]
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
else:
utils.execute(cmd, self.root_helper)
elif pid:
LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'command'), {'uuid': self.uuid, 'pid': pid})
else:
LOG.debug(_('No process started for %s'), self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the file name for a given kind of config file."""
pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids))
if ensure_pids_dir and not os.path.isdir(pids_dir):
os.makedirs(pids_dir, 0755)
return os.path.join(pids_dir, self.uuid + '.pid')
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
file_name = self.get_pid_file_name()
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
return int(f.read())
except IOError, e:
msg = _('Unable to access %s')
except ValueError, e:
msg = _('Unable to convert value in %s')
LOG.debug(msg, file_name)
return None
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmd = ['cat', '/proc/%s/cmdline' % pid]
try:
return self.uuid in utils.execute(cmd, self.root_helper)
except RuntimeError, e:
return False
| wallnerryan/quantum_migrate | quantum/agent/linux/external_process.py | Python | apache-2.0 | 3,644 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.compute_v1.types import compute
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.PublicDelegatedPrefixList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.PublicDelegatedPrefixList],
request: compute.ListGlobalPublicDelegatedPrefixesRequest,
response: compute.PublicDelegatedPrefixList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListGlobalPublicDelegatedPrefixesRequest):
The initial request object.
response (google.cloud.compute_v1.types.PublicDelegatedPrefixList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListGlobalPublicDelegatedPrefixesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.PublicDelegatedPrefixList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[compute.PublicDelegatedPrefix]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| googleapis/python-compute | google/cloud/compute_v1/services/global_public_delegated_prefixes/pagers.py | Python | apache-2.0 | 3,216 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.formatters import officemru # pylint: disable=unused-import
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import officemru
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides ([email protected])'
class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER-WIN7.DAT'])
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry([u'NTUSER-WIN7.DAT'])
key_path = (
u'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
u'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin_object = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin_object, file_entry=test_file_entry)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[5]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.089802')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
regvalue_identifier = u'Item 1'
expected_value_string = (
u'[F00000000][T01CD0146EA1EADB0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
u'SA-23E Mitchell-Hyundyne Starfury.docx')
self._TestRegvalue(event_object, regvalue_identifier, expected_value_string)
expected_message = (
u'[{0:s}] '
u'{1:s}: {2:s} '
u'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce SA-26 '
u'Thunderbolt Star Fury.docx '
u'Item 3: [F00000000][T01CD009208780140][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
u'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
u'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
u'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx').format(
key_path, regvalue_identifier, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# Test OfficeMRUWindowsRegistryEvent.
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.083')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
self.assertEqual(event_object.value_string, expected_value_string)
expected_message = u'[{0:s}] Value: {1:s}'.format(
key_path, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_value_string[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| dc3-plaso/plaso | tests/parsers/winreg_plugins/officemru.py | Python | apache-2.0 | 3,793 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from mox import IsA # noqa
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy # noqa
from django import http
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.vpn import workflows
class VPNTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:vpn:index' % DASHBOARD)
ADDIKEPOLICY_PATH = 'horizon:%s:vpn:addikepolicy' % DASHBOARD
ADDIPSECPOLICY_PATH = 'horizon:%s:vpn:addipsecpolicy' % DASHBOARD
ADDVPNSERVICE_PATH = 'horizon:%s:vpn:addvpnservice' % DASHBOARD
ADDVPNCONNECTION_PATH = 'horizon:%s:vpn:addipsecsiteconnection' % DASHBOARD
IKEPOLICY_DETAIL_PATH = 'horizon:%s:vpn:ikepolicydetails' % DASHBOARD
IPSECPOLICY_DETAIL_PATH = 'horizon:%s:vpn:ipsecpolicydetails' % DASHBOARD
VPNSERVICE_DETAIL_PATH = 'horizon:%s:vpn:vpnservicedetails' % DASHBOARD
VPNCONNECTION_DETAIL_PATH = 'horizon:%s:vpn:ipsecsiteconnectiondetails' %\
DASHBOARD
def set_up_expect(self):
# retrieves vpnservices
vpnservice1, vpnservice2 = self.vpnservices.list()[:2]
api.vpn.vpnservices_get(
IsA(http.HttpRequest)).AndReturn(self.vpnservices.list())
api.vpn.vpnservice_get(
IsA(http.HttpRequest), vpnservice1.id).AndReturn(vpnservice1)
api.vpn.vpnservice_get(
IsA(http.HttpRequest), vpnservice2.id).AndReturn(vpnservice2)
# retrieves ikepolicies
api.vpn.ikepolicies_get(
IsA(http.HttpRequest)).AndReturn(self.ikepolicies.list())
ikepolicy1, ikepolicy2 = self.ikepolicies.list()[:2]
api.vpn.ikepolicy_get(
IsA(http.HttpRequest), ikepolicy1.id).AndReturn(ikepolicy1)
api.vpn.ikepolicy_get(
IsA(http.HttpRequest), ikepolicy2.id).AndReturn(ikepolicy2)
# retrieves ipsecpolicies
api.vpn.ipsecpolicies_get(
IsA(http.HttpRequest)).AndReturn(self.ipsecpolicies.list())
ipsecpolicy1, ipsecpolicy2 = self.ipsecpolicies.list()[:2]
api.vpn.ipsecpolicy_get(
IsA(http.HttpRequest), ipsecpolicy1.id).AndReturn(ipsecpolicy1)
api.vpn.ipsecpolicy_get(
IsA(http.HttpRequest), ipsecpolicy2.id).AndReturn(ipsecpolicy2)
# retrieves ipsecsiteconnections
api.vpn.ipsecsiteconnections_get(
IsA(http.HttpRequest)).AndReturn(self.ipsecsiteconnections.list())
def set_up_expect_with_exception(self):
api.vpn.vpnservices_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
api.vpn.ikepolicies_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
api.vpn.ipsecpolicies_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
api.vpn.ipsecsiteconnections_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get', 'ikepolicy_get',
'ipsecpolicy_get', 'vpnservice_get',
'ipsecsiteconnection_get')})
def test_index_vpnservices(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.vpnservices.list()))
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get', 'ikepolicy_get',
'ipsecpolicy_get', 'vpnservice_get',
'ipsecsiteconnection_get')})
def test_index_ikepolicies(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ikepolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['ikepoliciestable_table'].data),
len(self.ikepolicies.list()))
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get', 'ikepolicy_get',
'ipsecpolicy_get', 'vpnservice_get',
'ipsecsiteconnection_get')})
def test_index_ipsecpolicies(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ipsecpolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['ipsecpoliciestable_table'].data),
len(self.ipsecpolicies.list()))
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get', 'ikepolicy_get',
'ipsecpolicy_get', 'vpnservice_get',
'ipsecsiteconnection_get')})
def test_index_ipsecsiteconnections(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(
self.INDEX_URL + '?tab=vpntabs__ipsecsiteconnections')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(
len(res.context['ipsecsiteconnectionstable_table'].data),
len(self.ipsecsiteconnections.list()))
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get')})
def test_index_exception_vpnservices(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get')})
def test_index_exception_ikepolicies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ikepolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get')})
def test_index_exception_ipsecpolicies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ipsecpolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get',
'ipsecsiteconnections_get')})
def test_index_exception_ipsecsiteconnections(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(
self.INDEX_URL + '?tab=vpntabs__ipsecsiteconnections')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'router_list')})
def test_add_vpnservice_get(self):
subnet = self.subnets.first()
networks = [{'subnets': [subnet, ]}, ]
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), subnet.tenant_id).AndReturn(networks)
routers = self.routers.list()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(routers)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVPNSERVICE_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddVPNService.name)
expected_objs = ['<AddVPNServiceStep: addvpnserviceaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
def test_add_ikepolicy_get(self):
res = self.client.get(reverse(self.ADDIKEPOLICY_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddIKEPolicy.name)
expected_objs = ['<AddIKEPolicyStep: addikepolicyaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
def test_add_ipsecpolicy_get(self):
res = self.client.get(reverse(self.ADDIPSECPOLICY_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddIPSecPolicy.name)
expected_objs = ['<AddIPSecPolicyStep: addipsecpolicyaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.vpn: ('ikepolicies_get', 'ipsecpolicies_get',
'vpnservices_get')})
def test_add_ipsecsiteconnection_get(self):
ikepolicies = self.ikepolicies.list()
api.vpn.ikepolicies_get(
IsA(http.HttpRequest)).AndReturn(ikepolicies)
ipsecpolicies = self.ipsecpolicies.list()
api.vpn.ipsecpolicies_get(
IsA(http.HttpRequest)).AndReturn(ipsecpolicies)
vpnservices = self.vpnservices.list()
api.vpn.vpnservices_get(
IsA(http.HttpRequest)).AndReturn(vpnservices)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVPNCONNECTION_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddIPSecSiteConnection.name)
expected_objs = ['<AddIPSecSiteConnectionStep: '
'addipsecsiteconnectionaction>',
'<AddIPSecSiteConnectionOptionalStep: '
'addipsecsiteconnectionoptionalaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
| r-icarus/openstack_microserver | openstack_dashboard/dashboards/project/vpn/tests.py | Python | apache-2.0 | 12,792 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test classes for code snippet for modeling article."""
from appengine.ndb.modeling import relation_model_models as models
from google.appengine.ext import ndb
from tests import AppEngineTestbedCase
class ContactTestCase(AppEngineTestbedCase):
"""A test case for the Contact model with relationship model."""
def setUp(self):
"""Creates 1 contact and 1 company.
Assuming the contact belongs to tmatsuo's addressbook.
"""
super(ContactTestCase, self).setUp()
self.myaddressbook_key = ndb.Key('AddressBook', 'tmatsuo')
mary = models.Contact(parent=self.myaddressbook_key, name='Mary')
mary.put()
self.mary_key = mary.key
google = models.Company(name='Google')
google.put()
self.google_key = google.key
candit = models.Company(name='Candit')
candit.put()
self.candit_key = candit.key
def test_relationship(self):
"""Two companies hire Mary."""
mary = self.mary_key.get()
google = self.google_key.get()
candit = self.candit_key.get()
# first google hires Mary
models.ContactCompany(parent=self.myaddressbook_key,
contact=mary.key,
company=google.key,
title='engineer').put()
# then another company named 'candit' hires Mary too
models.ContactCompany(parent=self.myaddressbook_key,
contact=mary.key,
company=candit.key,
title='president').put()
# get the list of companies that Mary belongs to
self.assertEqual(len(mary.companies), 2)
| raybuhr/python-docs-samples | appengine/ndb/modeling/tests/test_relation_model_models.py | Python | apache-2.0 | 2,319 |
import os.path
from uuid import uuid4
import shutil
import logging
logger = logging.getLogger(__name__)
_MARKER = object()
class FileUploadTempStore(object):
session_storage_slug = 'websauna.tempstore'
def __init__(self, request):
self.tempdir = request.registry.settings['websauna.uploads_tempdir']
if os.path.os.makedirs(self.tempdir, mode=0o777, exist_ok=True):
logger.warning("Creating dir: '%s'", self.tempdir)
self.request = request
self.session = request.session
def preview_url(self, _uid):
# pylint: disable=no-self-use
return None
def __contains__(self, name):
return name in self.session.get(self.session_storage_slug, {})
def __setitem__(self, name, data):
newdata = data.copy()
stream = newdata.pop('fp', None)
if stream is not None:
newdata['randid'] = uuid4().hex
file_name = os.path.join(self.tempdir, newdata['randid'])
shutil.copyfileobj(stream, open(file_name, 'wb'))
self._tempstore_set(name, newdata)
def _tempstore_set(self, name, data):
# cope with sessioning implementations that cant deal with
# in-place mutation of mutable values (temporarily?)
existing = self.session.get(self.session_storage_slug, {})
existing[name] = data
self.session[self.session_storage_slug] = existing
def clear(self):
data = self.session.pop('substanced.tempstore', {})
for cookie in data.items():
randid = cookie.get('randid')
file_name = os.path.join(self.tempdir, randid)
try:
os.remove(file_name)
except OSError:
pass
def get(self, name, default=None):
data = self.session.get(self.session_storage_slug, {}).get(name)
if data is None:
return default
newdata = data.copy()
randid = newdata.get('randid')
if randid is not None:
file_name = os.path.join(self.tempdir, randid)
try:
newdata['fp'] = open(file_name, 'rb')
except IOError:
pass
return newdata
def __getitem__(self, name):
data = self.get(name, _MARKER)
if data is _MARKER:
raise KeyError(name)
return data
| enkidulan/enkiblog | src/enkiblog/core/deform/tempstorage.py | Python | apache-2.0 | 2,365 |
# -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import types
import decimal
import unittest
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{" + "("*500 + "}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
# This looks like a nested format spec.
])
self.assertAllRaise(SyntaxError, "invalid syntax",
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, 'invalid character in identifier',
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
["f'{\n}'",
])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
g = fn(4)
self.assertEqual(next(g), 8)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",
])
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:="5")}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{"="}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a="3=")}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
if __name__ == '__main__':
unittest.main()
| batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_fstring.py | Python | apache-2.0 | 47,266 |
import tensorflow as tf
import os
import sys
from copy import copy
from model.pipeline import Pipeline
from tensorflow.python import debug as tf_debug
if __name__ == "__main__":
num_keypoints = 30
patch_feature_dim = 8
decoding_levels = 5
kp_transform_loss = 1e4
base_recon_weight = 0.1
recon_weight = Pipeline.ValueScheduler(
"piecewise_constant",
[100000, 200000],
[base_recon_weight, base_recon_weight*100, base_recon_weight*1000]
)
base_learning_rate=0.01
learning_rate = Pipeline.ValueScheduler(
"piecewise_constant",
[100000, 200000],
[base_learning_rate, base_learning_rate*0.1, base_learning_rate*0.01]
)
keypoint_separation_bandwidth=0.04
keypoint_separation_loss_weight = 10
opt = {
"optimizer": "Adam",
"data_name": "celeba_mafl_100x100_80x80",
"recon_name": "gaussian_fixedvar_in_01",
"encoder_name": "general_80x80",
"decoder_name": "general_80x80",
"latent_dim": num_keypoints*2+(num_keypoints+1)*patch_feature_dim,
"train_color_jittering": True,
"train_random_mirroring": False,
"train_batch_size": 8,
"train_shuffle_capacity": 1000,
"learning_rate": learning_rate,
"max_epochs": 2000,
"weight_decay": 1e-6,
"test_steps": 5000,
"test_limit": 200,
"recon_weight": recon_weight,
}
opt["encoder_options"] = {
"keypoint_num": num_keypoints,
"patch_feature_dim": patch_feature_dim,
"ae_recon_type": opt["recon_name"],
"keypoint_concentration_loss_weight": 100.,
"keypoint_axis_balancing_loss_weight": 200.,
"keypoint_separation_loss_weight": keypoint_separation_loss_weight,
"keypoint_separation_bandwidth": keypoint_separation_bandwidth,
"keypoint_transform_loss_weight": kp_transform_loss,
"keypoint_decoding_heatmap_levels": decoding_levels,
"keypoint_decoding_heatmap_level_base": 0.5**(1/2),
"image_channels": 3,
}
opt["decoder_options"] = copy(opt["encoder_options"])
# -------------------------------------
model_dir = os.path.join("results/celeba_30")
vp = Pipeline(None, opt, model_dir=model_dir)
print(vp.opt)
with vp.graph.as_default():
sess = vp.create_session()
vp.run_full_train(sess, restore=True)
vp.run_full_test(sess)
| YutingZhang/lmdis-rep | exp-ae-celeba-mafl-30.py | Python | apache-2.0 | 2,477 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute.floating_ips import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class FloatingIPDetailsNegativeTestJSON(base.BaseFloatingIPsTest):
"""Negative tests of floating ip detail
Negative tests of floating ip detail with compute microversion less
than 2.36.
"""
max_microversion = '2.35'
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7ab18834-4a4b-4f28-a2c5-440579866695')
def test_get_nonexistent_floating_ip_details(self):
"""Test getting non existent floating ip should fail"""
# Creating a non-existent floatingIP id
if CONF.service_available.neutron:
non_exist_id = data_utils.rand_uuid()
else:
non_exist_id = data_utils.rand_int_id(start=999)
self.assertRaises(lib_exc.NotFound,
self.client.show_floating_ip, non_exist_id)
| openstack/tempest | tempest/api/compute/floating_ips/test_list_floating_ips_negative.py | Python | apache-2.0 | 1,663 |
"""Controlles a bunch of remotes."""
import asyncio
import functools
import logging
import os
import pathlib
import signal
import sys
import traceback
from implant import commands, connect, core, testing
log = logging.getLogger(__name__)
PLUGINS_ENTRY_POINT_GROUP = 'implant.plugins'
def parse_command(line):
"""Parse a command from line."""
args = []
kwargs = {}
command, *parts = line.split(' ')
for part in parts:
if '=' in part:
k, v = part.split('=')
kwargs[k] = v
else:
args.append(part)
return command, args, kwargs
async def _execute_command(io_queues, line):
default_lines = {
b'e\n': (b'implant.commands:Echo data=bar\n', {}),
b'i\n': (b'implant.core:InvokeImport fullname=implant.commands\n', {}),
b'\n': (b'implant.commands:SystemLoad data=bar\n', {}),
}
if line in default_lines:
line, _ = default_lines[line]
command_name, _, params = parse_command(line[:-1].decode())
log.info("sending: %s %s", command_name, params)
try:
result = await io_queues.execute(command_name, **params)
except Exception as ex: # noqa
log.error("Error:\n%s", traceback.format_exc())
else:
return result
async def log_remote_stderr(remote):
# await remote.launched()
if remote.stderr:
log.info("Logging remote stderr: %s", remote)
async for line in remote.stderr:
log.debug("\tRemote #%s: %s", remote.pid, line[:-1].decode())
class Console:
def __init__(self, connectors, *, loop=None, **options):
self.loop = loop if loop is not None else asyncio.get_event_loop()
self.options = options
self.connectors = connectors
async def feed_stdin_to_remotes(self, remotes):
try:
async with core.Incomming(pipe=sys.stdin, loop=self.loop) as reader:
while True:
line = await reader.readline()
if line == b'':
break
result = await asyncio.gather(
*(_execute_command(remote, line) for remote, *_ in remotes.values()),
loop=self.loop
)
print("< {}\n >".format(result), end="")
except asyncio.CancelledError:
log.info("Terminating...")
except Exception as ex:
log.info(ex)
for remote, fut_remote, error_log in remotes.values():
fut_remote.cancel()
await fut_remote
error_log.cancel()
await error_log
async def connect(self):
remotes = {}
for connector, default_args in self.connectors.items():
if remotes.get(connector, None) is not None:
log.warning('Process for %s already launched! Skipping...', connector)
continue
remote = await connector.launch(
options=self.options, **default_args, loop=self.loop
)
fut_remote = asyncio.ensure_future(remote.communicate(), loop=self.loop)
error_log = asyncio.ensure_future(log_remote_stderr(remote), loop=self.loop)
remotes[connector] = (remote, fut_remote, error_log)
return remotes
async def run(self):
never_ending = asyncio.Future(loop=self.loop)
remotes = await self.connect()
feeder = asyncio.ensure_future(self.feed_stdin_to_remotes(remotes), loop=self.loop)
def _sigint_handler():
log.info('SIGINT...')
never_ending.cancel()
self.loop.add_signal_handler(signal.SIGINT, _sigint_handler)
try:
await never_ending
except asyncio.CancelledError:
log.debug('Cancelled')
pass
feeder.cancel()
await feeder
def main(debug=False, log_config=None):
log.info('deballator master process: %s', os.getpid())
loop = asyncio.get_event_loop()
# replace existing signal handler with noop as long as our remotes are not fully running
# otherwise cancellation of process startup will lead to orphaned remote processes
def noop():
log.error('Noop on signal SIGINT')
loop.add_signal_handler(signal.SIGINT, noop)
options = {
'debug': debug,
'log_config': log_config,
# 'venv': False,
# 'venv': True,
# 'venv': '~/.implant',
}
# if debug:
# log.setLevel(logging.DEBUG)
console = Console({
# testing.PipeConnector(loop=loop): {},
connect.Local(): {
'python_bin': pathlib.Path('~/.pyenv/versions/3.5.2/bin/python').expanduser(),
},
# connect.Ssh(hostname='localhost'): {
# 'python_bin': pathlib.Path('~/.pyenv/versions/3.5.2/bin/python').expanduser(),
# },
# connect.Lxd(
# container='zesty',
# hostname='localhost',
# ): {
# 'python_bin': pathlib.Path('/usr/bin/python3').expanduser()
# },
}, loop=loop, **options)
task = asyncio.ensure_future(console.run())
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
log.error('Keyboard interrupt...')
task.cancel()
loop.run_until_complete(task)
except BaseException as ex:
core.log.error("Error %s:\n%s", type(ex), traceback.format_exc())
finally:
for task in asyncio.Task.all_tasks():
if not task.done():
log.error("pending: %s", task)
log.info(' - '.join(["this is the end"] * 3))
loop.stop()
loop.close()
| diefans/debellator | src/implant/master.py | Python | apache-2.0 | 5,656 |
#!/usr/bin/env python3
from argparse import ArgumentParser
from os import environ
from sys import argv
from requests import put
def read_file(file_path): # pragma: no cover
with open(file_path, 'rb') as f:
return f.read()
def upload(file_path, repository, repository_path, url, username, password):
url = f'{url}/repository/{repository}/{repository_path}'
data = read_file(file_path)
headers = {'Content-Type': 'application/octet-stream'}
response = put(url, data=data, headers=headers, auth=(username, password))
if response.status_code != 201:
raise OSError(f'{response.status_code}, {response.content}')
return response.status_code
def parse_args(args):
parser = ArgumentParser(description='Get assets')
parser.add_argument('file_path', help='File to upload, e.g. ./myartifact-1.0.0.jar')
parser.add_argument('repository', help='Nexus3 repository, e.g. maven-releases')
parser.add_argument('repository_path',
help='Path within Nexus3 repository, e.g com/myorg/myartifact/1.0.0/myartifact-1.0.0.jar')
parser.add_argument('-l', '--url', default=environ.get('NEXUS3_REST_URL', None),
help='Nexus3 url, e.g. http://nexus_host:8080')
parser.add_argument('-u', '--username', default=environ.get('NEXUS3_USERNAME', None), help='Nexus3 username')
parser.add_argument('-p', '--password', default=environ.get('NEXUS3_PASSWORD', None), help='Nexus3 password')
return parser.parse_args(args)
def main(file_path, repository, repository_path, url, username, password):
print(upload(file_path, repository, repository_path, url, username, password))
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| bjuvensjo/scripts | vang/nexus3/upload.py | Python | apache-2.0 | 1,766 |
from ghizmo.commands import lib
def tags(config, args):
"""
List all tags.
"""
return config.repo.tags()
def show_tags(config, args):
"""
Show info for tags supplied on stdin.
"""
for item in lib.input_json_lines():
yield config.repo.tag(item)
def _delete_ref(repo, ref_name, force, dry_run):
ref = repo.ref(ref_name)
if not ref and not force:
raise ValueError("Reference not found: %s" % ref_name)
if not dry_run:
ref.delete()
return lib.status("Deleted %s" % ref_name, dry_run=dry_run)
def branches(config, args):
"""
List all branches.
"""
return config.repo.branches()
def branches_full(config, args):
"""
List full info about all branches.
"""
for b in config.repo.branches():
yield config.repo.branch(b.name)
def show_branches(config, args):
"""
Show branches supplied on stdin.
"""
for item in lib.input_json_lines():
yield config.repo.branch(item)
def delete_branches(config, args):
"""
Delete branches supplied on stdin.
"""
for ref_name in lib.input_json_lines():
yield _delete_ref(config.repo, "heads/" + ref_name, args.force, args.dry_run)
def refs(config, args):
"""
List all refs.
"""
return config.repo.refs()
def show_refs(config, args):
"""
Show refs supplied on stdin.
"""
for item in lib.input_json_lines():
yield config.repo.ref(item)
def delete_refs(config, args):
"""
Delete refs supplied on stdin.
"""
for ref_name in lib.input_json_lines():
yield _delete_ref(config.repo, ref_name, args.force, args.dry_run)
def pull_requests(config, args):
"""
List all PRs.
"""
return config.repo.pull_requests(state=args.get("state", "open"))
def contributors(config, args):
"""
List all contributors.
"""
return config.repo.contributors()
def contributor_stats(config, args):
"""
List contributor statistics.
"""
return config.repo.contributor_statistics()
def collaborators(config, args):
"""
List all collaborators.
"""
return config.repo.collaborators()
def releases(config, args):
"""
List all releases.
"""
return config.repo.releases()
def stargazers(config, args):
"""
List all stargazers.
"""
return config.repo.stargazers()
def create_release(config, args):
"""
Create a new release.
"""
yield config.repo.create_release(args.tag_name, name=args.name,
target_commitish=args.get("target_commitish"), body=args.get("body"),
draft=args.get_bool("draft"), prerelease=args.get_bool("prerelease"))
def issues(config, args):
"""
List issues.
"""
return config.repo.issues(milestone=args.get("milestone"), state=args.get("state"),
assignee=args.get("assignee"), mentioned=args.get("mentioned"),
labels=args.get("labels"), sort=args.get("sort"),
direction=args.get("direction"), since=args.get("since"),
number=args.get("number", -1))
| jlevy/ghizmo | ghizmo/commands/repo.py | Python | apache-2.0 | 3,040 |
import doctest
from maiden import config
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(config))
return tests
| oozappa/maiden | tests/__init__.py | Python | apache-2.0 | 148 |
"""
This module holds models related to benefits features and configurations
"""
from django import forms
from django.db import models
from django.db.models import UniqueConstraint
from django.urls import reverse
from polymorphic.models import PolymorphicModel
from sponsors.models.assets import ImgAsset, TextAsset, FileAsset, ResponseAsset, Response
from sponsors.models.enums import (
PublisherChoices,
LogoPlacementChoices,
AssetsRelatedTo,
)
########################################
# Benefit features abstract classes
from sponsors.models.managers import BenefitFeatureQuerySet
########################################
# Benefit features abstract classes
class BaseLogoPlacement(models.Model):
publisher = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in PublisherChoices],
verbose_name="Publisher",
help_text="On which site should the logo be displayed?"
)
logo_place = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in LogoPlacementChoices],
verbose_name="Logo Placement",
help_text="Where the logo should be placed?"
)
link_to_sponsors_page = models.BooleanField(
default=False,
help_text="Override URL in placement to the PSF Sponsors Page, rather than the sponsor landing page url.",
)
describe_as_sponsor = models.BooleanField(
default=False,
help_text='Override description with "SPONSOR_NAME is a SPONSOR_LEVEL sponsor of the Python Software Foundation".',
)
class Meta:
abstract = True
class BaseTieredQuantity(models.Model):
package = models.ForeignKey("sponsors.SponsorshipPackage", on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
class Meta:
abstract = True
class BaseEmailTargetable(models.Model):
class Meta:
abstract = True
class BaseAsset(models.Model):
ASSET_CLASS = None
related_to = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in AssetsRelatedTo],
verbose_name="Related To",
help_text="To which instance (Sponsor or Sponsorship) should this asset relate to."
)
internal_name = models.CharField(
max_length=128,
verbose_name="Internal Name",
help_text="Unique name used internally to control if the sponsor/sponsorship already has the asset",
unique=False,
db_index=True,
)
label = models.CharField(
max_length=256,
help_text="What's the title used to display the input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
class Meta:
abstract = True
class BaseRequiredAsset(BaseAsset):
due_date = models.DateField(default=None, null=True, blank=True)
class Meta:
abstract = True
class BaseProvidedAsset(BaseAsset):
shared = models.BooleanField(
default = False,
)
def shared_value(self):
return None
class Meta:
abstract = True
class AssetConfigurationMixin:
"""
This class should be used to implement assets configuration.
It's a mixin to updates the benefit feature creation to also
create the related assets models
"""
def create_benefit_feature(self, sponsor_benefit, **kwargs):
if not self.ASSET_CLASS:
raise NotImplementedError(
"Subclasses of AssetConfigurationMixin must define an ASSET_CLASS attribute.")
# Super: BenefitFeatureConfiguration.create_benefit_feature
benefit_feature = super().create_benefit_feature(sponsor_benefit, **kwargs)
content_object = sponsor_benefit.sponsorship
if self.related_to == AssetsRelatedTo.SPONSOR.value:
content_object = sponsor_benefit.sponsorship.sponsor
asset_qs = content_object.assets.filter(internal_name=self.internal_name)
if not asset_qs.exists():
asset = self.ASSET_CLASS(
content_object=content_object, internal_name=self.internal_name,
)
asset.save()
return benefit_feature
class Meta:
abstract = True
class BaseRequiredImgAsset(BaseRequiredAsset):
ASSET_CLASS = ImgAsset
min_width = models.PositiveIntegerField()
max_width = models.PositiveIntegerField()
min_height = models.PositiveIntegerField()
max_height = models.PositiveIntegerField()
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseRequiredTextAsset(BaseRequiredAsset):
ASSET_CLASS = TextAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the text input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
max_length = models.IntegerField(
default=None,
help_text="Limit to length of the input, empty means unlimited",
null=True,
blank=True,
)
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseRequiredResponseAsset(BaseRequiredAsset):
ASSET_CLASS = ResponseAsset
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseProvidedTextAsset(BaseProvidedAsset):
ASSET_CLASS = TextAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the text input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
class Meta(BaseProvidedAsset.Meta):
abstract = True
class BaseProvidedFileAsset(BaseProvidedAsset):
ASSET_CLASS = FileAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the file to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the file should be used",
default="",
blank=True
)
shared_file = models.FileField(blank=True, null=True)
def shared_value(self):
return self.shared_file
class Meta(BaseProvidedAsset.Meta):
abstract = True
class AssetMixin:
def __related_asset(self):
"""
This method exists to avoid FK relationships between the GenericAsset
and reuired asset objects. This is to decouple the assets set up from the
real assets value in a way that, if the first gets deleted, the second can
still be re used.
"""
object = self.sponsor_benefit.sponsorship
if self.related_to == AssetsRelatedTo.SPONSOR.value:
object = self.sponsor_benefit.sponsorship.sponsor
return object.assets.get(internal_name=self.internal_name)
@property
def value(self):
asset = self.__related_asset()
return asset.value
@value.setter
def value(self, value):
asset = self.__related_asset()
asset.value = value
asset.save()
@property
def user_edit_url(self):
url = reverse("users:update_sponsorship_assets", args=[self.sponsor_benefit.sponsorship.pk])
return url + f"?required_asset={self.pk}"
@property
def user_view_url(self):
url = reverse("users:view_provided_sponsorship_assets", args=[self.sponsor_benefit.sponsorship.pk])
return url + f"?provided_asset={self.pk}"
class RequiredAssetMixin(AssetMixin):
"""
This class should be used to implement required assets.
It's a mixin to get the information submitted by the user
and which is stored in the related asset class.
"""
pass
class ProvidedAssetMixin(AssetMixin):
"""
This class should be used to implement provided assets.
It's a mixin to get the information submitted by the staff
and which is stored in the related asset class.
"""
@AssetMixin.value.getter
def value(self):
if hasattr(self, 'shared') and self.shared:
return self.shared_value()
return super().value
######################################################
# SponsorshipBenefit features configuration models
class BenefitFeatureConfiguration(PolymorphicModel):
"""
Base class for sponsorship benefits configuration.
"""
benefit = models.ForeignKey("sponsors.SponsorshipBenefit", on_delete=models.CASCADE)
class Meta:
verbose_name = "Benefit Feature Configuration"
verbose_name_plural = "Benefit Feature Configurations"
@property
def benefit_feature_class(self):
"""
Return a subclass of BenefitFeature related to this configuration.
Every configuration subclass must implement this property
"""
raise NotImplementedError
def get_benefit_feature_kwargs(self, **kwargs):
"""
Return kwargs dict to initialize the benefit feature.
If the benefit should not be created, return None instead.
"""
# Get all fields from benefit feature configuration base model
base_fields = set(BenefitFeatureConfiguration._meta.get_fields())
# Get only the fields from the abstract base feature model
benefit_fields = set(self._meta.get_fields()) - base_fields
# Configure the related benefit feature using values from the configuration
for field in benefit_fields:
# Skip the OneToOne rel from the base class to BenefitFeatureConfiguration base class
# since this field only exists in child models
if BenefitFeatureConfiguration is getattr(field, 'related_model', None):
continue
kwargs[field.name] = getattr(self, field.name)
return kwargs
def get_benefit_feature(self, **kwargs):
"""
Returns an instance of a configured type of BenefitFeature
"""
BenefitFeatureClass = self.benefit_feature_class
kwargs = self.get_benefit_feature_kwargs(**kwargs)
if kwargs is None:
return None
return BenefitFeatureClass(**kwargs)
def display_modifier(self, name, **kwargs):
return name
def create_benefit_feature(self, sponsor_benefit, **kwargs):
"""
This methods persists a benefit feature from the configuration
"""
feature = self.get_benefit_feature(sponsor_benefit=sponsor_benefit, **kwargs)
if feature is not None:
feature.save()
return feature
class LogoPlacementConfiguration(BaseLogoPlacement, BenefitFeatureConfiguration):
"""
Configuration to control how sponsor logo should be placed
"""
class Meta(BaseLogoPlacement.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Logo Placement Configuration"
verbose_name_plural = "Logo Placement Configurations"
@property
def benefit_feature_class(self):
return LogoPlacement
def __str__(self):
return f"Logo Configuration for {self.get_publisher_display()} at {self.get_logo_place_display()}"
class TieredQuantityConfiguration(BaseTieredQuantity, BenefitFeatureConfiguration):
"""
Configuration for tiered quantities among packages
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Tiered Benefit Configuration"
verbose_name_plural = "Tiered Benefit Configurations"
@property
def benefit_feature_class(self):
return TieredQuantity
def get_benefit_feature_kwargs(self, **kwargs):
if kwargs["sponsor_benefit"].sponsorship.package == self.package:
return super().get_benefit_feature_kwargs(**kwargs)
return None
def __str__(self):
return f"Tiered Quantity Configuration for {self.benefit} and {self.package} ({self.quantity})"
def display_modifier(self, name, **kwargs):
if kwargs.get("package") != self.package:
return name
return f"{name} ({self.quantity})"
class EmailTargetableConfiguration(BaseEmailTargetable, BenefitFeatureConfiguration):
"""
Configuration for email targeatable benefits
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Email Targetable Configuration"
verbose_name_plural = "Email Targetable Configurations"
@property
def benefit_feature_class(self):
return EmailTargetable
def __str__(self):
return f"Email targeatable configuration"
class RequiredImgAssetConfiguration(AssetConfigurationMixin, BaseRequiredImgAsset, BenefitFeatureConfiguration):
class Meta(BaseRequiredImgAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Require Image Configuration"
verbose_name_plural = "Require Image Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_img_asset_cfg")]
def __str__(self):
return f"Require image configuration"
@property
def benefit_feature_class(self):
return RequiredImgAsset
class RequiredTextAssetConfiguration(AssetConfigurationMixin, BaseRequiredTextAsset,
BenefitFeatureConfiguration):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Require Text Configuration"
verbose_name_plural = "Require Text Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_text_asset_cfg")]
def __str__(self):
return f"Require text configuration"
@property
def benefit_feature_class(self):
return RequiredTextAsset
class RequiredResponseAssetConfiguration(
AssetConfigurationMixin, BaseRequiredResponseAsset, BenefitFeatureConfiguration
):
class Meta(BaseRequiredResponseAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Require Response Configuration"
verbose_name_plural = "Require Response Configurations"
constraints = [
UniqueConstraint(fields=["internal_name"], name="uniq_response_asset_cfg")
]
def __str__(self):
return f"Require response configuration"
@property
def benefit_feature_class(self):
return RequiredResponseAsset
class ProvidedTextAssetConfiguration(
AssetConfigurationMixin, BaseProvidedTextAsset, BenefitFeatureConfiguration
):
class Meta(BaseProvidedTextAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Provided Text Configuration"
verbose_name_plural = "Provided Text Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_provided_text_asset_cfg")]
def __str__(self):
return f"Provided text configuration"
@property
def benefit_feature_class(self):
return ProvidedTextAsset
class ProvidedFileAssetConfiguration(AssetConfigurationMixin, BaseProvidedFileAsset,
BenefitFeatureConfiguration):
class Meta(BaseProvidedFileAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Provided File Configuration"
verbose_name_plural = "Provided File Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_provided_file_asset_cfg")]
def __str__(self):
return f"Provided File configuration"
@property
def benefit_feature_class(self):
return ProvidedFileAsset
####################################
# SponsorBenefit features models
class BenefitFeature(PolymorphicModel):
"""
Base class for sponsor benefits features.
"""
objects = BenefitFeatureQuerySet.as_manager()
sponsor_benefit = models.ForeignKey("sponsors.SponsorBenefit", on_delete=models.CASCADE)
class Meta:
verbose_name = "Benefit Feature"
verbose_name_plural = "Benefit Features"
def display_modifier(self, name, **kwargs):
return name
class LogoPlacement(BaseLogoPlacement, BenefitFeature):
"""
Logo Placement feature for sponsor benefits
"""
class Meta(BaseLogoPlacement.Meta, BenefitFeature.Meta):
verbose_name = "Logo Placement"
verbose_name_plural = "Logo Placement"
def __str__(self):
return f"Logo for {self.get_publisher_display()} at {self.get_logo_place_display()}"
class TieredQuantity(BaseTieredQuantity, BenefitFeature):
"""
Tiered Quantity feature for sponsor benefits
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeature.Meta):
verbose_name = "Tiered Quantity"
verbose_name_plural = "Tiered Quantities"
def display_modifier(self, name, **kwargs):
return f"{name} ({self.quantity})"
def __str__(self):
return f"{self.quantity} of {self.sponsor_benefit} for {self.package}"
class EmailTargetable(BaseEmailTargetable, BenefitFeature):
"""
For email targeatable benefits
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeature.Meta):
verbose_name = "Email Targetable Benefit"
verbose_name_plural = "Email Targetable Benefits"
def __str__(self):
return f"Email targeatable"
class RequiredImgAsset(RequiredAssetMixin, BaseRequiredImgAsset, BenefitFeature):
class Meta(BaseRequiredImgAsset.Meta, BenefitFeature.Meta):
verbose_name = "Require Image"
verbose_name_plural = "Require Images"
def __str__(self):
return f"Require image"
def as_form_field(self, **kwargs):
help_text = kwargs.pop("help_text", self.help_text)
label = kwargs.pop("label", self.label)
required = kwargs.pop("required", False)
return forms.ImageField(required=required, help_text=help_text, label=label, widget=forms.ClearableFileInput, **kwargs)
class RequiredTextAsset(RequiredAssetMixin, BaseRequiredTextAsset, BenefitFeature):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeature.Meta):
verbose_name = "Require Text"
verbose_name_plural = "Require Texts"
def __str__(self):
return f"Require text"
def as_form_field(self, **kwargs):
help_text = kwargs.pop("help_text", self.help_text)
label = kwargs.pop("label", self.label)
required = kwargs.pop("required", False)
max_length = self.max_length
widget = forms.TextInput
if max_length is None or max_length > 256:
widget = forms.Textarea
return forms.CharField(required=required, help_text=help_text, label=label, widget=widget, **kwargs)
class RequiredResponseAsset(RequiredAssetMixin, BaseRequiredResponseAsset, BenefitFeature):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeature.Meta):
verbose_name = "Require Response"
verbose_name_plural = "Required Responses"
def __str__(self):
return f"Require response"
def as_form_field(self, **kwargs):
help_text = kwargs.pop("help_text", self.help_text)
label = kwargs.pop("label", self.label)
required = kwargs.pop("required", False)
return forms.ChoiceField(required=required, choices=Response.choices(), widget=forms.RadioSelect, help_text=help_text, label=label, **kwargs)
class ProvidedTextAsset(ProvidedAssetMixin, BaseProvidedTextAsset, BenefitFeature):
class Meta(BaseProvidedTextAsset.Meta, BenefitFeature.Meta):
verbose_name = "Provided Text"
verbose_name_plural = "Provided Texts"
def __str__(self):
return f"Provided text {self.internal_name}"
class ProvidedFileAsset(ProvidedAssetMixin, BaseProvidedFileAsset, BenefitFeature):
class Meta(BaseProvidedFileAsset.Meta, BenefitFeature.Meta):
verbose_name = "Provided File"
verbose_name_plural = "Provided Files"
def __str__(self):
return f"Provided file"
| manhhomienbienthuy/pythondotorg | sponsors/models/benefits.py | Python | apache-2.0 | 20,095 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# quicksorts.py (C) myke, 2015
# 2015-11-08 1.1
# various versions of quicksort alogo
import random
TIMES = 10
SIZE = 10
RANGE = 10
# -----------------------------------------------
def qs1 (al):
""" Algo quicksort for a list
"""
if not al:
return []
return (qs1([x for x in al if x < al[0]])
+ [x for x in al if x == al[0]]
+ qs1([x for x in al if x > al[0]]))
# -----------------------------------------------
def qs2 (array):
""" another longer version"""
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
if x == pivot:
equal.append(x)
if x > pivot:
greater.append(x)
return qs2(less)+equal+qs2(greater)
else:
return array
# -----------------------------------------------
qs = qs1
# -----------------------------------------------
def main ():
""" dispatcher: tests make and sort """
for i in range(TIMES):
sa = [random.randint(1, RANGE) for e in range(SIZE)]
print (sa, "-->", qs (sa))
main()
# -----------------------------------------------
# used: http://stackoverflow.com/questions/18262306/quick-sort-with-python
| mykespb/pythoner | quicksorts.py | Python | apache-2.0 | 1,354 |
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides general http handler functions for processing http responses from TSDB services.
"""
import http.client
import json
from baidubce import utils
from baidubce.exception import BceClientError
from baidubce.exception import BceServerError
from baidubce.utils import Expando
def parse_json(http_response, response):
"""If the body is not empty, convert it to a python object and set as the value of
response.body. http_response is always closed if no error occurs.
:param http_response: the http_response object returned by HTTPConnection.getresponse()
:type http_response: httplib.HTTPResponse
:param response: general response object which will be returned to the caller
:type response: baidubce.BceResponse
:return: always true
:rtype bool
"""
body = http_response.read()
if body:
response.__dict__.update(json.loads(body, object_hook=dict_to_python_object).__dict__)
http_response.close()
return True
def dict_to_python_object(d):
"""
:param d:
:return:
"""
attr = {}
for k, v in list(d.items()):
k = str(k)
attr[k] = v
return Expando(attr) | baidubce/bce-sdk-python | baidubce/services/tsdb/tsdb_handler.py | Python | apache-2.0 | 1,744 |
import cPickle
point_table = {}
point_table[( 4, 4)] = 400.
point_table[( 3, 4)] = 270.
point_table[( 2, 4)] = 170.
point_table[( 1, 4)] = 100.
point_table[( 0, 4)] = 0.
point_table[(-1, 4)] = 0.
point_table[(-2, 4)] = 0.
point_table[(-3, 4)] = 0.
point_table[(-4, 4)] = 0.
point_table[( 4, 3)] = 240.
point_table[( 3, 3)] = 300.
point_table[( 2, 3)] = 200.
point_table[( 1, 3)] = 120.
point_table[( 0, 3)] = 0.
point_table[(-1, 3)] = 0.
point_table[(-2, 3)] = 0.
point_table[(-3, 3)] = 0.
point_table[(-4, 3)] = 0.
point_table[( 4, 2)] = 140.
point_table[( 3, 2)] = 180.
point_table[( 2, 2)] = 240.
point_table[( 1, 2)] = 160.
point_table[( 0, 2)] = 10.
point_table[(-1, 2)] = 0.
point_table[(-2, 2)] = 0.
point_table[(-3, 2)] = 0.
point_table[(-4, 2)] = 0.
point_table[( 4, 1)] = 100.
point_table[( 3, 1)] = 110.
point_table[( 2, 1)] = 150.
point_table[( 1, 1)] = 200.
point_table[( 0, 1)] = 40.
point_table[(-1, 1)] = 0.
point_table[(-2, 1)] = 0.
point_table[(-3, 1)] = 0.
point_table[(-4, 1)] = 0.
point_table[( 4, 0)] = 0.
point_table[( 3, 0)] = 0.
point_table[( 2, 0)] = 10.
point_table[( 1, 0)] = 20.
point_table[( 0, 0)] = 160.
point_table[(-1, 0)] = 20.
point_table[(-2, 0)] = 10.
point_table[(-3, 0)] = 0.
point_table[(-4, 0)] = 0.
point_table[( 4,-1)] = 0.
point_table[( 3,-1)] = 0.
point_table[( 2,-1)] = 0.
point_table[( 1,-1)] = 0.
point_table[( 0,-1)] = 40.
point_table[(-1,-1)] = 200.
point_table[(-2,-1)] = 150.
point_table[(-3,-1)] = 110.
point_table[(-4,-1)] = 100.
point_table[( 4,-2)] = 0.
point_table[( 3,-2)] = 0.
point_table[( 2,-2)] = 0.
point_table[( 1,-2)] = 0.
point_table[( 0,-2)] = 10.
point_table[(-1,-2)] = 160.
point_table[(-2,-2)] = 240.
point_table[(-3,-2)] = 180.
point_table[(-4,-2)] = 140.
point_table[( 4,-3)] = 0.
point_table[( 3,-3)] = 0.
point_table[( 2,-3)] = 0.
point_table[( 1,-3)] = 0.
point_table[( 0,-3)] = 0.
point_table[(-1,-3)] = 120.
point_table[(-2,-3)] = 200.
point_table[(-3,-3)] = 300.
point_table[(-4,-3)] = 240.
point_table[( 4,-4)] = 0.
point_table[( 3,-4)] = 0.
point_table[( 2,-4)] = 0.
point_table[( 1,-4)] = 0.
point_table[( 0,-4)] = 0.
point_table[(-1,-4)] = 100.
point_table[(-2,-4)] = 170.
point_table[(-3,-4)] = 270.
point_table[(-4,-4)] = 400.
cPickle.dump(point_table, open('../data/point_table.cpickle', 'wb'))
| lshuo/WC2014_bet | src/set_zhihu_point_table.py | Python | apache-2.0 | 2,291 |
import sys
import os
import subprocess
import filter_remapped_reads
import util
#
# filter_remapped_reads.py
# INPUT FILES:
# to_remap_bam - input BAM file containing original set of reads
# that need to be remapped after having their alleles flipped
#
# remap_bam - input BAM file containing remapped reads. Read names in this
# file should be delimited with the '.' character and
# contain the following fields:
# <orig_name>.<coordinate>.<read_number>.<total_read_number>
#
# For single-end reads <coordinate> is the left end of the read
# (e.g. 16052611)
# For paired-end reads the coordinate is the start of the
# the left read and start of the right read:
# (e.g. 16052611-16052734)
#
#
#
# OUTPUT FILES:
# keep_bam - ouput BAM file containing reads that are retained
# after filtering
#
#
#
# TODO: need to verify that interleaved read pairs handled appropriately
# TODO: need to test single end reads
#
#
def write_sam_header(f):
f.write("@HD VN:1.0 SO:coordinate\n")
f.write("@SQ SN:chr22 LN:51304566\n")
f.write('@PG ID:bowtie2 PN:bowtie2 VN:2.2.6 CL:"/iblm/netapp/home/gmcvicker/anaconda2/bin/bowtie2-align-s --wrapper basic-0 -x /iblm/netapp/data1/external/GRC37/combined/bowtie2_index/hg37 -1 /tmp/16686.inpipe1 -2 /tmp/16686.inpipe2\n')
def write_to_remap_bam_pe(data_dir="test_data", bam_filename="test_data/test.to.remap.bam"):
sam_lines = ["SRR1658224.34085432 163 chr22 16052611 12 101M = 16052734 224 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"SRR1658224.34085432 83 chr22 16052734 12 101M = 16052611 -224 TCCTGACAGCATGTGCCCAAGGTGGTCAGGATACAGCTTGCTTCTATATATTTTAGGGAGAAAATACATCAGCCTGTAAACAAAAAATTAAATTCTAAGGT DDDDDDDDDDDDDDEDEEEFFFFHHFHHIIFIIJJJJIJJJJJJJJJJIIJJJIIIJIJIJJJJIFIIIJJIJJJJJJJIIJJJJJJJHHHHHFFFFFCCC AS:i:0 XS:i:-12 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-11 YT:Z:CP",
"SRR1658224.34975561 99 chr22 16071944 12 101M = 16072163 320 ATTTATTTATTTATTTATTATTGGGACAGAGTCTCACTCTGTCCCCCAGACTGGAGTCCAGTGACATGATCTCAGCTCACTGCAACCTCTGCCTCGTGGGT CCCFFFFFHHHHHJJJJJJJJJJJJIJJJJIEHIJJJJJJJIIJJJJJIJJJJJJJJJJIJHIJIJJJJIJJJJJHHHHHHFFFFFECEEEEDDDDDDBBD AS:i:-5 XS:i:-22 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:89C11 YS:i:0 YT:Z:CP",
"SRR1658224.34975561 147 chr22 16072163 12 101M = 16071944 -320 GTCTCAAACTTCTGACCTCAGGTGATCCACCCACCTCGACCTCCCAAAGTGCTGGGATTACAGGCACTAGGTCCCTAAATTAGAGCCATATTCTTTAATGT DDBCDEDCDCCDCC?DDDDDDDBACBDA<FFB:6HIIJIIJIIJJJJJJJJJJJJIJJIHJJJJJIJJJJJJJJJJJJJJJJJJJJJJHHHGGFFFFFCCC AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-5 YT:Z:CP",
"SRR1658224.7462188 163 chr22 16235410 17 101M = 16235625 316 AGATAATTGTCTTATTTTTTTAAAAAAAGAGTAACTTTATATTATGGAATTCATAATATTTGAGACTATAATGCATGACATAAATAGTATAAAGGAGAGAG CC@FFFFFHHHHHJJJJJJJJJJJJJJJJIJBGIJJJJJJJJJJJJJIJIFIJJJJJJJJJHHHHGFFFFFFEEEEDEEDDDDDEED@CFFFEDDD?ABB? AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-5 YT:Z:CP",
"SRR1658224.7462188 83 chr22 16235625 17 101M = 16235410 -316 TTCAAAAGATGGTATATGCATTAATATTTTCATACAACTTCCAGCTTTTGTTTTTCTTCATTTAATTTTATTTATTTATTTATTTTTGAGATGGAGTCTCG CBDDDDECEEDEFFFDFFFHHHHHHHJJIIJJIHIHFHGHJJJJJJJGJJJJJIJJJIIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJHHHHHFFFDFCCC AS:i:-5 XS:i:-39 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:15G85 YS:i:0 YT:Z:CP",
"SRR1658224.31153145 163 chr22 16235410 17 101M = 16235625 316 AGATAATTGTCTTATTTTTTTAAAAAAAGAGTAACTTTATATTATGGAATTCATAATATTTGAGACTATAATGCATGACATAAATAGTATAAAGGAGAGAG CCCFFFFFHHHHHJJJJJJJJJJJJJJJJIJFHIJJJJJJJJJJJIJIJJFHIJJJJJJJJHHHHHFFFFFFEDEEEEEDDDDDEED@DEEEEDDDDDDB2 AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-2 YT:Z:CP",
"SRR1658224.31153145 83 chr22 16235625 17 101M = 16235410 -316 TTCAAAAGATGGTATGTGCATTAATATTTTCATACAACTTCCAGTTTTTGTTTTTCTTCATTTAATTTTATTTATTTATTTATTTTTGAGATGGAGTCTCG DDDDDDDDEEEEEEFFFFFFHHHHGHHJJIJJJIIJIJIHJHF@(JJJJJJJJJJJJIIIIJJJJJJJIJJJJJJJJJJJJJJJJJJJHHHHHFFFDFCCC AS:i:-2 XS:i:-36 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:44C56 YS:i:0 YT:Z:CP",
"SRR1658224.25014179 163 chr22 16236979 31 101M = 16237137 259 ATGTTTTTTAAGATTTAATATTACTTTTTCCAACATCTTTTTATCCTCAAGTTTTTTATATTCCTGTTGTATTTTTTTATAGATAATAACTCCTGTTGAAT CCCFFFFFHHHHFIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJHGIJJJJJJJJIJJJJJJJHHHHHHHDCDDECDEEDDEDDDDDDDDDDCDC AS:i:0 XS:i:-28 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:0 YT:Z:CP",
"SRR1658224.25014179 83 chr22 16237137 31 101M = 16236979 -259 TCATCGAACTACATTAATAAAATAATATAGCTTGATAATGAAGTAGGCTGAGAATAATCTCATACAAAACCAATAACAAATTTTGAAATACATTTACTTGC CEFFFFFHHHHHHHHJJJJJJJJJIHJIJIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIIJJJIHJJJJJJIJJJJJJJJJJJJHHHHHFDDFFCCC AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:0 YT:Z:CP",
"readpair1 163 chr22 100 12 101M = 200 201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair2 163 chr22 150 12 101M = 250 201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair1 83 chr22 200 12 101M = 100 -201 TCCTGACAGCATGTGCCCAAGGTGGTCAGGATACAGCTTGCTTCTATATATTTTAGGGAGAAAATACATCAGCCTGTAAACAAAAAATTAAATTCTAAGGT DDDDDDDDDDDDDDEDEEEFFFFHHFHHIIFIIJJJJIJJJJJJJJJJIIJJJIIIJIJIJJJJIFIIIJJIJJJJJJJIIJJJJJJJHHHHHFFFFFCCC AS:i:0 XS:i:-12 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-11 YT:Z:CP",
"readpair2 163 chr22 250 12 101M = 150 -201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP"]
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# write temporary file in SAM format, before converting to BAM
sam_filename = data_dir + "/tmp.sam"
f = open(sam_filename, "w")
write_sam_header(f)
for line in sam_lines:
f.write(line + "\n")
f.close()
subprocess.check_call("samtools view -b %s > %s" % (sam_filename, bam_filename), shell=True)
def write_remap_bam_pe(data_dir="test_data", bam_filename="test_data/test.remap.bam"):
sam_lines = [
# Read pair expected to map 2 times and maps to correct location 2 times
"SRR1658224.34085432.16052611-16052734.1.2 163 chr22 16052611 12 101M = 16052734 224 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"SRR1658224.34085432.16052611-16052734.1.2 83 chr22 16052734 12 101M = 16052611 -224 TCCTGACAGCATGTGCCCAAGGTGGTCAGGATACAGCTTGCTTCTATATATTTTAGGGAGAAAATACATCAGCCTGTAAACAAAAAATTAAATTCTAAGGT DDDDDDDDDDDDDDEDEEEFFFFHHFHHIIFIIJJJJIJJJJJJJJJJIIJJJIIIJIJIJJJJIFIIIJJIJJJJJJJIIJJJJJJJHHHHHFFFFFCCC AS:i:0 XS:i:-12 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-11 YT:Z:CP",
"SRR1658224.34085432.16052611-16052734.2.2 163 chr22 16052611 12 101M = 16052734 224 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"SRR1658224.34085432.16052611-16052734.2.2 83 chr22 16052734 12 101M = 16052611 -224 TCCTGACAGCATGTGCCCAAGGTGGTCAGGATACAGCTTGCTTCTATATATTTTAGGGAGAAAATACATCAGCCTGTAAACAAAAAATTAAATTCTAAGGT DDDDDDDDDDDDDDEDEEEFFFFHHFHHIIFIIJJJJIJJJJJJJJJJIIJJJIIIJIJIJJJJIFIIIJJIJJJJJJJIIJJJJJJJHHHHHFFFFFCCC AS:i:0 XS:i:-12 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-11 YT:Z:CP",
# Read pair expected to map 2 times, but only maps 1 time
"SRR1658224.34975561.16071944-16072163.2.2 99 chr22 16071944 12 101M = 16072163 320 ATTTATTTATTTATTTATTATTGGGACAGAGTCTCACTCTGTCCCCCAGACTGGAGTCCAGTGACATGATCTCAGCTCACTGCAACCTCTGCCTCGTGGGT CCCFFFFFHHHHHJJJJJJJJJJJJIJJJJIEHIJJJJJJJIIJJJJJIJJJJJJJJJJIJHIJIJJJJIJJJJJHHHHHHFFFFFECEEEEDDDDDDBBD AS:i:-5 XS:i:-22 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:89C11 YS:i:0 YT:Z:CP",
"SRR1658224.34975561.16071944-16072163.2.2 147 chr22 16072163 12 101M = 16071944 -320 GTCTCAAACTTCTGACCTCAGGTGATCCACCCACCTCGACCTCCCAAAGTGCTGGGATTACAGGCACTAGGTCCCTAAATTAGAGCCATATTCTTTAATGT DDBCDEDCDCCDCC?DDDDDDDBACBDA<FFB:6HIIJIIJIIJJJJJJJJJJJJIJJIHJJJJJIJJJJJJJJJJJJJJJJJJJJJJHHHGGFFFFFCCC AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-5 YT:Z:CP",
# Read pair expected to map 2 times, but only 1/2 of 2nd pair maps back to same location
"SRR1658224.7462188.16235410-16235625.1.2 163 chr22 16235410 17 101M = 16235625 316 AGATAATTGTCTTATTTTTTTAAAAAAAGAGTAACTTTATATTATGGAATTCATAATATTTGAGACTATAATGCATGACATAAATAGTATAAAGGAGAGAG CC@FFFFFHHHHHJJJJJJJJJJJJJJJJIJBGIJJJJJJJJJJJJJIJIFIJJJJJJJJJHHHHGFFFFFFEEEEDEEDDDDDEED@CFFFEDDD?ABB? AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-5 YT:Z:CP",
"SRR1658224.7462188.16235410-16235625.1.2 83 chr22 16235625 17 101M = 16235410 -316 TTCAAAAGATGGTATATGCATTAATATTTTCATACAACTTCCAGCTTTTGTTTTTCTTCATTTAATTTTATTTATTTATTTATTTTTGAGATGGAGTCTCG CBDDDDECEEDEFFFDFFFHHHHHHHJJIIJJIHIHFHGHJJJJJJJGJJJJJIJJJIIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJHHHHHFFFDFCCC AS:i:-5 XS:i:-39 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:15G85 YS:i:0 YT:Z:CP",
"SRR1658224.7462188.16235410-16235625.2.2 163 chr22 16235410 17 101M * 0 0 AGATAATTGTCTTATTTTTTTAAAAAAAGAGTAACTTTATATTATGGAATTCATAATATTTGAGACTATAATGCATGACATAAATAGTATAAAGGAGAGAG CC@FFFFFHHHHHJJJJJJJJJJJJJJJJIJBGIJJJJJJJJJJJJJIJIFIJJJJJJJJJHHHHGFFFFFFEEEEDEEDDDDDEED@CFFFEDDD?ABB? AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-5 YT:Z:CP",
# Read pair expected to map 2 times, but 1 pair maps to wrong location
"SRR1658224.31153145.16235410-16235625.1.2 163 chr22 16235410 17 101M = 16235625 316 AGATAATTGTCTTATTTTTTTAAAAAAAGAGTAACTTTATATTATGGAATTCATAATATTTGAGACTATAATGCATGACATAAATAGTATAAAGGAGAGAG CCCFFFFFHHHHHJJJJJJJJJJJJJJJJIJFHIJJJJJJJJJJJIJIJJFHIJJJJJJJJHHHHHFFFFFFEDEEEEEDDDDDEED@DEEEEDDDDDDB2 AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-2 YT:Z:CP",
"SRR1658224.31153145.16235410-16235625.1.2 83 chr22 16235625 17 101M = 16235410 -316 TTCAAAAGATGGTATGTGCATTAATATTTTCATACAACTTCCAGTTTTTGTTTTTCTTCATTTAATTTTATTTATTTATTTATTTTTGAGATGGAGTCTCG DDDDDDDDEEEEEEFFFFFFHHHHGHHJJIJJJIIJIJIHJHF@(JJJJJJJJJJJJIIIIJJJJJJJIJJJJJJJJJJJJJJJJJJJHHHHHFFFDFCCC AS:i:-2 XS:i:-36 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:44C56 YS:i:0 YT:Z:CP",
"SRR1658224.31153145.16235410-16235625.2.2 163 chr22 18235410 17 101M = 16235625 316 AGATAATTGTCTTATTTTTTTAAAAAAAGAGTAACTTTATATTATGGAATTCATAATATTTGAGACTATAATGCATGACATAAATAGTATAAAGGAGAGAG CCCFFFFFHHHHHJJJJJJJJJJJJJJJJIJFHIJJJJJJJJJJJIJIJJFHIJJJJJJJJHHHHHFFFFFFEDEEEEEDDDDDEED@DEEEEDDDDDDB2 AS:i:0 XS:i:0 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-2 YT:Z:CP",
"SRR1658224.31153145.16235410-16235625.2.2 83 chr22 18235625 17 101M = 16235410 -316 TTCAAAAGATGGTATGTGCATTAATATTTTCATACAACTTCCAGTTTTTGTTTTTCTTCATTTAATTTTATTTATTTATTTATTTTTGAGATGGAGTCTCG DDDDDDDDEEEEEEFFFFFFHHHHGHHJJIJJJIIJIJIHJHF@(JJJJJJJJJJJJIIIIJJJJJJJIJJJJJJJJJJJJJJJJJJJHHHHHFFFDFCCC AS:i:-2 XS:i:-36 XN:i:0 XM:i:1 XO:i:0 XG:i:0 NM:i:1 MD:Z:44C56 YS:i:0 YT:Z:CP",
# Read pair expected to map 2 times, but does not map at all
# "SRR1658224.25014179"
# Read pairs expected to map 1 times, with read-pairs interleaved
"readpair1.100-200.1.2 163 chr22 100 12 101M = 200 201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair2.150-250.1.2 163 chr22 150 12 101M = 250 201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair1.100-200.1.2 83 chr22 200 12 101M = 100 -201 TCCTGACAGCATGTGCCCAAGGTGGTCAGGATACAGCTTGCTTCTATATATTTTAGGGAGAAAATACATCAGCCTGTAAACAAAAAATTAAATTCTAAGGT DDDDDDDDDDDDDDEDEEEFFFFHHFHHIIFIIJJJJIJJJJJJJJJJIIJJJIIIJIJIJJJJIFIIIJJIJJJJJJJIIJJJJJJJHHHHHFFFFFCCC AS:i:0 XS:i:-12 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-11 YT:Z:CP",
"readpair2.150-250.1.2 163 chr22 250 12 101M = 150 -201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair1.100-200.2.2 163 chr22 100 12 101M = 200 201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair2.150-250.2.2 163 chr22 150 12 101M = 250 201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP",
"readpair1.100-200.2.2 83 chr22 200 12 101M = 100 -201 TCCTGACAGCATGTGCCCAAGGTGGTCAGGATACAGCTTGCTTCTATATATTTTAGGGAGAAAATACATCAGCCTGTAAACAAAAAATTAAATTCTAAGGT DDDDDDDDDDDDDDEDEEEFFFFHHFHHIIFIIJJJJIJJJJJJJJJJIIJJJIIIJIJIJJJJIFIIIJJIJJJJJJJIIJJJJJJJHHHHHFFFFFCCC AS:i:0 XS:i:-12 XN:i:0 XM:i:0 XO:i:0 XG:i:0 NM:i:0 MD:Z:101 YS:i:-11 YT:Z:CP",
"readpair2.150-250.2.2 163 chr22 250 12 101M = 150 -201 TGGAGACATAAAATGAGGCATATCTGACCTCCACTTCCAAAAACATCTGAGATAGGTCTCAGTTAATTAAGAAAGTTTGTTCTGCCTAGTTTAAGGACATG CCCFFFFFHHHHHJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJIJJIHIJJJJEHIJJJHJJJJJJJJJJJJ=DHHHHHFFFFFFEEEEEEDDCDDDC AS:i:-11 XS:i:-17 XN:i:0 XM:i:2 XO:i:0 XG:i:0 NM:i:2 MD:Z:7G44C48 YS:i:0 YT:Z:CP"
]
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# write temporary file in SAM format, before converting to BAM
sam_filename = data_dir + "/tmp.sam"
f = open(sam_filename, "w")
write_sam_header(f)
for line in sam_lines:
f.write(line + "\n")
f.close()
# write to temp bam file
tmp_bam_filename = data_dir + "/tmp.bam"
subprocess.check_call("samtools view -b %s > %s" % (sam_filename, tmp_bam_filename), shell=True)
# sort the temp bam file
util.sort_bam(tmp_bam_filename, data_dir + "/tmp")
# remove temp bam
os.remove(tmp_bam_filename)
# rename sorted bam to output bam filename
os.rename(data_dir + "/tmp.sort.bam", bam_filename)
def read_bam(bam):
"""
Read a bam file into a list where each element of the list is a line from
the bam file (with the newline stripped). The header is discarded.
"""
res = subprocess.check_output('samtools view %s' % bam, shell=True)
return res.strip().split('\n')
def test_filter_remapped_reads_pe():
test_dir = "test_data"
to_remap_bam_filename = "test_data/test.to.remap.bam"
remap_bam_filename = "test_data/test.remap.bam"
keep_bam_filename = "test_data/keep.bam"
# write test input data
write_to_remap_bam_pe(data_dir=test_dir, bam_filename=to_remap_bam_filename)
write_remap_bam_pe(data_dir=test_dir, bam_filename=remap_bam_filename)
# run filter remapped reads
filter_remapped_reads.main(to_remap_bam_filename, remap_bam_filename, keep_bam_filename)
# read in filtered reads
lines = read_bam(keep_bam_filename)
# read lines from keep BAM file
read_dict = {}
for line in lines:
words = line.split()
read_name = words[0]
if read_name in read_dict:
read_dict[read_name].append(words)
else:
read_dict[read_name] = [words]
# verify that filtered reads look correct
# we expect a read pair with this identifier:
read_name = "SRR1658224.34085432"
assert read_name in read_dict
reads = read_dict[read_name]
assert len(reads) == 2
pos1 = int(reads[0][3])
pos2 = int(reads[1][3])
assert pos1 == 16052611
assert pos2 == 16052734
# expect these read pairs to be filtered out (not present)
# only one version of read pair maps (expect 2)
assert "SRR1658224.34975561" not in read_dict
# 1/2 of second read pair missing
assert "SRR1658224.7462188" not in read_dict
# 1 pair maps to wrong location
assert "SRR1658224.31153145" not in read_dict
# neither pair maps
assert "SRR1658224.25014179" not in read_dict
# expect these (interleaved) read pairs to be kept
read_name = "readpair1"
assert read_name in read_dict
reads = read_dict[read_name]
assert len(reads) == 2
pos1 = int(reads[0][3])
pos2 = int(reads[1][3])
assert pos1 == 100
assert pos2 == 200
read_name = "readpair2"
assert read_name in read_dict
reads = read_dict[read_name]
assert len(reads) == 2
pos1 = int(reads[0][3])
pos2 = int(reads[1][3])
assert pos1 == 150
assert pos2 == 250
| smozaffari/WASP | mapping/test_filter_remapped_reads.py | Python | apache-2.0 | 18,537 |
"""Contains function for calculating BERT embeddings"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import re
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
from scipy.spatial.distance import cosine, euclidean
class BertEmbedding(object):
"""Class for calculating embeddings between two texts"""
def __init__(self, bert_model='bert-base-uncased', max_seq_length=50, device='cpu'):
"""Initializing the BERT model"""
self.bert_model = bert_model
self.max_seq_length = max_seq_length
self.device = torch.device("cpu" if device=='cpu' or not torch.cuda.is_available() else "cuda")
n_gpu = torch.cuda.device_count()
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model, do_lower_case=True)
self.model = BertModel.from_pretrained(self.bert_model)
self.model.to(self.device)
if n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
self.model.eval()
def get_embeddings(self, sentences, layer=-1):
"""Returns embeddings of words/sentences"""
assert isinstance(sentences, list)
for pair in sentences:
assert len(pair) == 1
examples = self._read_examples(sentences)
features = self._convert_examples_to_features(
examples=examples)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=16)
out_features = []
for input_ids, input_mask, example_indices in eval_dataloader:
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
all_encoder_layers, _ = self.model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
values = torch.mean(all_encoder_layers[layer], 1)
out_features.append(values.detach().cpu().numpy())
flat_list = [item for sublist in out_features for item in sublist]
return flat_list
def _convert_examples_to_features(self, examples):
"""Generate features of examples"""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = self.tokenizer.tokenize(example.text)
if len(tokens_a) > self.max_seq_length - 2:
tokens_a = tokens_a[0:(self.max_seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(input_type_ids) == self.max_seq_length
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _read_examples(self, inp):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
for a, in inp:
line_a = a.strip()
examples.append(
InputExample(unique_id=unique_id, text=line_a))
unique_id += 1
return examples
class InputExample(object):
"""Input an example"""
def __init__(self, unique_id, text):
self.unique_id = unique_id
self.text = text
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
| googleinterns/contextual-adjectives | categorize_adjectives/libraries/bert_embeddings.py | Python | apache-2.0 | 5,343 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Report the various flap limits applied in the system."""
import collections
import math
import os
import makani
from makani.avionics.bootloader import system_config
from makani.avionics.firmware.params import codec
from makani.avionics.network import network_config
from makani.config import mconfig
from makani.control import system_types
_CONFIG = mconfig.MakeParams('common.all_params')
_SERVOS = ['A' + f for f in '124578'] + ['E1', 'E2', 'R1', 'R2']
_FLAPS = _SERVOS[0:6] + ['Elevator', 'Rudder']
def GetSimLimits():
servo_limits = [_CONFIG['sim']['servos_sim'][i]['servo_drive']
for i in range(len(_SERVOS))]
return {_SERVOS[i]: (servo_limits[i]['ref_model_min_position_limit'],
servo_limits[i]['ref_model_max_position_limit'])
for i in range(len(_SERVOS))}
def GetControlLimits():
return {_FLAPS[i]: (_CONFIG['control']['control_output']['flaps_min'][i],
_CONFIG['control']['control_output']['flaps_max'][i])
for i in range(len(_FLAPS))}
def _GetControlOutputLimits(output, lower_limit='lower_flap_limits',
upper_limit='upper_flap_limits'):
return {_FLAPS[i]: (output[lower_limit][i], output[upper_limit][i])
for i in range(len(_FLAPS))}
def GetControlCrosswindLimits():
return _GetControlOutputLimits(_CONFIG['control']['crosswind']['output'])
def GetControlCrosswindFlareLimits():
return _GetControlOutputLimits(_CONFIG['control']['crosswind']['output'],
lower_limit='lower_flap_limits_flare')
def GetControlTransInLimits():
return _GetControlOutputLimits(_CONFIG['control']['trans_in']['output'])
def GetControlHoverLimits():
return _GetControlOutputLimits(_CONFIG['control']['hover']['output'])
def GetControlManualLimits():
return _GetControlOutputLimits(_CONFIG['control']['manual']['output'])
def GetAvionicsServoLimits():
"""Get the avionics servo mechanical limits for the current system."""
sys_conf = system_config.SystemConfig.GetSystemConfigBySerial(
_CONFIG['system']['wing_serial'])
config_file = os.path.join(makani.HOME,
'avionics/servo/firmware/config_params.yaml')
net_conf = network_config.NetworkConfig()
yaml_keys = [sys_conf.config[net_conf.GetAioNode('servo_%s' % s.lower())]
for s in _SERVOS]
limits = [codec.DecodeYamlFile(config_file, key) for key in yaml_keys]
return {_SERVOS[i]: (limits[i].servo_min_limit, limits[i].servo_max_limit)
for i in range(len(_SERVOS))}
def _RudderServoToFlap(servo_angle):
servo_config = _CONFIG['system']['servos'][system_types.kServoR1]
return servo_config['nonlinear_servo_to_flap_ratio'] * math.sin(servo_angle)
def _RudderFlapToServo(flap_angle):
servo_config = _CONFIG['system']['servos'][system_types.kServoR1]
return math.asin(flap_angle / servo_config['nonlinear_servo_to_flap_ratio'])
def FlapsToServos(flaps):
"""Convert flap limits to servo limits."""
servos = {}
for flap, flap_range in flaps.iteritems():
if flap.startswith('A'):
servos[flap] = flap_range
elif flap == 'Elevator':
servos['E1'] = flap_range
servos['E2'] = flap_range
elif flap == 'Rudder':
servos['R1'] = (_RudderFlapToServo(flap_range[0]),
_RudderFlapToServo(flap_range[1]))
servos['R2'] = servos['R1']
else:
raise ValueError('Invalid flap %s' % flap)
return servos
def ServosToFlaps(servos):
"""Convert servo limits to flap limits."""
flaps = {}
for servo, servo_range in servos.iteritems():
if servo.startswith('A'):
flaps[servo] = servo_range
flaps['Elevator'] = (0.5 * (servos['E1'][0] + servos['E2'][0]),
0.5 * (servos['E1'][1] + servos['E2'][1]))
flaps['Rudder'] = (
_RudderServoToFlap(0.5 * (servos['R1'][0] + servos['R2'][0])),
_RudderServoToFlap(0.5 * (servos['R1'][1] + servos['R2'][1])))
return flaps
def _PrintFlaps(name, limits, print_header):
if print_header:
print '%15s A1 A2 A4 A5 A7 A8 Ele Rud' % ''
print ('%15s min:' + ' %5.1f' * 8) % (
(name,) + tuple([math.degrees(limits[_FLAPS[i]][0])
for i in range(len(_FLAPS))]))
print ('%15s max:' + ' %5.1f' * 8) % (
('',) + tuple([math.degrees(limits[_FLAPS[i]][1])
for i in range(len(_FLAPS))]))
def _PrintServos(name, limits, print_header):
if print_header:
print ('%15s A1 A2 A4 A5 A7 A8'
' E1 E2 R1 R2' % '')
print ('%15s min:' + ' %5.1f' * 10) % (
(name,) + tuple([math.degrees(limits[_SERVOS[i]][0])
for i in range(len(_SERVOS))]))
print ('%15s max:' + ' %5.1f' * 10) % (
('',) + tuple([math.degrees(limits[_SERVOS[i]][1])
for i in range(len(_SERVOS))]))
Limit = collections.namedtuple('Limit', ['name', 'limits', 'is_flap_limit'])
_LIMITS = [
Limit('Controller', GetControlLimits(), True),
Limit('Cont. Crosswind', GetControlCrosswindLimits(), True),
Limit('Cont. Flare', GetControlCrosswindFlareLimits(), True),
Limit('Cont. Trans In', GetControlTransInLimits(), True),
Limit('Cont. Hover', GetControlHoverLimits(), True),
Limit('Cont. Manual', GetControlManualLimits(), True),
Limit('Servos', GetAvionicsServoLimits(), False),
Limit('Simulator', GetSimLimits(), False),
]
def GetServoLimits():
return [(l.name, FlapsToServos(l.limits) if l.is_flap_limit else l.limits)
for l in _LIMITS]
def GetFlapLimits():
return [(l.name, l.limits if l.is_flap_limit else ServosToFlaps(l.limits))
for l in _LIMITS]
def Main():
print '\nFlap limits:'
print_header = True
for name, flaps in GetFlapLimits():
_PrintFlaps(name, flaps, print_header)
print_header = False
print '\nServo limits:'
print_header = True
for name, servos in GetServoLimits():
_PrintServos(name, servos, print_header)
print_header = False
if __name__ == '__main__':
Main()
| google/makani | analysis/control/flap_limits.py | Python | apache-2.0 | 6,670 |
#!/usr/bin/env python3
# Copyright 2021 Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import pathlib
import subprocess
import time
from typing import List
import sys
from http import client
from plumbum import cli
from acceptance.common import base
from acceptance.common import docker
from acceptance.common import scion
from python.lib import scion_addr
import toml
logger = logging.getLogger(__name__)
class Test(base.TestBase):
"""
Test that in a topology with multiple ASes, every AS is capable of
requesting renewed certificates. The test verifies that each AS has loaded
the renewed certificate.
The test is split into multiple steps:
1. Start the topology.
2. For each AS in the topology, create a new private key and request
certificate chain renewal. The renewed chain is verified against the
TRC.
3. Remove the previous private key from the control servers.
4. Ensure that the new private key and certificate are loaded by observing
the http endpoint.
5. Check connectivity with an end to end test.
6. Stop all control servers and purge the state. This includes deleting
all databases with cached data, including the path and trust database.
7. Restart control servers and check connectivity again.
"""
end2end = cli.SwitchAttr(
"end2end_integration",
str,
default="./bin/end2end_integration",
help="The end2end_integration binary " +
"(default: ./bin/end2end_integration)",
)
def main(self):
if not self.nested_command:
try:
self.setup()
# Give some time for the topology to start.
time.sleep(10)
self._run()
finally:
self.teardown()
def _run(self):
isd_ases = scion.ASList.load("%s/gen/as_list.yml" %
self.test_state.artifacts).all
cs_configs = self._cs_configs()
logger.info("==> Start renewal process")
for isd_as in isd_ases:
logging.info("===> Start renewal: %s" % isd_as)
self._renewal_request(isd_as)
logger.info("==> Check key and certificate reloads")
self._check_key_cert(cs_configs)
logger.info("==> Check connectivity")
subprocess.run(
[self.end2end, "-d", "-outDir", self.test_state.artifacts],
check=True)
logger.info("==> Shutting down control servers and purging caches")
for container in self.list_containers("scion_sd.*"):
self.test_state.dc("rm", container)
for container in self.list_containers("scion_cs.*"):
self.stop_container(container)
for cs_config in cs_configs:
files = list((pathlib.Path(self.test_state.artifacts) /
"gen-cache").glob("%s*" % cs_config.stem))
for db_file in files:
db_file.unlink()
logger.info("Deleted files: %s" % [file.name for file in files])
logger.info("==> Restart containers")
self.setup_start()
time.sleep(5)
logger.info("==> Check connectivity")
subprocess.run(
[self.end2end, "-d", "-outDir", self.test_state.artifacts],
check=True)
logger.info("==> Backup mode")
for isd_as in isd_ases:
logging.info("===> Start renewal: %s" % isd_as)
self._renewal_request(isd_as, mode="--backup")
def _renewal_request(
self,
isd_as: scion_addr.ISD_AS,
mode: str = "--force",
):
as_dir = self._to_as_dir(isd_as)
docker_dir = pathlib.Path("/share") / self._rel(as_dir)
def read_file(filename: str) -> str:
with open(as_dir / "crypto/as" / filename) as f:
return f.read()
chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(),
isd_as.as_file_fmt())
old_chain = read_file(chain_name)
old_key = read_file("cp-as.key")
chain = docker_dir / "crypto/as" / chain_name
args = [
chain,
docker_dir / "crypto/as/cp-as.key",
mode,
"--trc",
docker_dir / "certs/ISD1-B1-S1.trc",
"--sciond",
self.execute("tester_%s" % isd_as.file_fmt(), "sh", "-c",
"echo $SCION_DAEMON").strip(),
*self._local_flags(isd_as),
]
logger.info("Requesting certificate chain renewal: %s" %
chain.relative_to(docker_dir))
logger.info(
self.execute("tester_%s" % isd_as.file_fmt(), "./bin/scion-pki",
"certificate", "renew", *args))
logger.info("Verify renewed certificate chain")
verify_out = self.execute("tester_%s" % isd_as.file_fmt(),
"./bin/scion-pki", "certificate", "verify",
chain, "--trc",
"/share/gen/trcs/ISD1-B1-S1.trc")
logger.info(str(verify_out).rstrip("\n"))
renewed_chain = read_file(chain_name)
renewed_key = read_file("cp-as.key")
if renewed_chain == old_chain:
raise Exception(
"renewed chain does not differ from previous chain")
if renewed_key == old_key:
raise Exception("renewed key does not differ from previous key")
def _check_key_cert(self, cs_configs: List[pathlib.Path]):
not_ready = [*cs_configs]
for _ in range(5):
logger.info(
"Checking if all control servers have reloaded the key and certificate..."
)
for cs_config in not_ready:
conn = client.HTTPConnection(self._http_endpoint(cs_config))
conn.request("GET", "/signer")
resp = conn.getresponse()
if resp.status != 200:
logger.info("Unexpected response: %d %s", resp.status,
resp.reason)
continue
isd_as = scion_addr.ISD_AS(cs_config.stem[2:-2])
as_dir = self._to_as_dir(isd_as)
chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(),
isd_as.as_file_fmt())
pld = json.loads(resp.read().decode("utf-8"))
if pld["subject_key_id"] != self._extract_skid(
as_dir / "crypto/as" / chain_name):
continue
logger.info(
"Control server successfully loaded new key and certificate: %s"
% self._rel(cs_config))
not_ready.remove(cs_config)
if not not_ready:
break
time.sleep(3)
else:
logger.error(
"Control servers without reloaded key and certificate: %s" %
[cs_config.name for cs_config in not_ready])
sys.exit(1)
def _http_endpoint(self, cs_config: pathlib.Path):
with open(cs_config, "r") as f:
cfg = toml.load(f)
return cfg["metrics"]["prometheus"]
def _extract_skid(self, file: pathlib.Path):
out = subprocess.check_output(
['openssl', 'x509', '-in', file, '-noout', '-text'])
lines = out.splitlines()
for i, v in enumerate(lines):
if v.decode("utf-8").find("Subject Key Identifier") > -1:
skid = lines[i + 1].decode("utf-8").split()[-1].replace(
":", " ").upper()
break
return skid
def _rel(self, path: pathlib.Path):
return path.relative_to(pathlib.Path(self.test_state.artifacts))
def _to_as_dir(self, isd_as: scion_addr.ISD_AS) -> pathlib.Path:
return pathlib.Path("%s/gen/AS%s" %
(self.test_state.artifacts, isd_as.as_file_fmt()))
def _cs_configs(self) -> List[pathlib.Path]:
return list(
pathlib.Path("%s/gen" %
self.test_state.artifacts).glob("AS*/cs*.toml"))
def _local_flags(self, isd_as: scion_addr.ISD_AS) -> List[str]:
return [
"--local",
self.execute("tester_%s" % isd_as.file_fmt(), "sh", "-c",
"echo $SCION_LOCAL_ADDR").strip(),
]
if __name__ == "__main__":
base.register_commands(Test)
base.TestBase.test_state = base.TestState(scion.SCIONDocker(),
docker.Compose())
Test.run()
| netsec-ethz/scion | acceptance/cert_renewal/test.py | Python | apache-2.0 | 9,210 |
# Generated by Django 2.2.4 on 2019-08-07 19:56
import awx.main.utils.polymorphic
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
from awx.main.migrations._rbac import (
rebuild_role_parentage, rebuild_role_hierarchy,
migrate_ujt_organization, migrate_ujt_organization_backward,
restore_inventory_admins, restore_inventory_admins_backward
)
def rebuild_jt_parents(apps, schema_editor):
rebuild_role_parentage(apps, schema_editor, models=('jobtemplate',))
class Migration(migrations.Migration):
dependencies = [
('main', '0108_v370_unifiedjob_dependencies_processed'),
]
operations = [
# backwards parents and ancestors caching
migrations.RunPython(migrations.RunPython.noop, rebuild_jt_parents),
# add new organization field for JT and all other unified jobs
migrations.AddField(
model_name='unifiedjob',
name='tmp_organization',
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this unified job.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.Organization'),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='tmp_organization',
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this template.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.Organization'),
),
# while new and old fields exist, copy the organization fields
migrations.RunPython(migrate_ujt_organization, migrate_ujt_organization_backward),
# with data saved, remove old fields
migrations.RemoveField(
model_name='project',
name='organization',
),
migrations.RemoveField(
model_name='workflowjobtemplate',
name='organization',
),
# now, without safely rename the new field without conflicts from old field
migrations.RenameField(
model_name='unifiedjobtemplate',
old_name='tmp_organization',
new_name='organization',
),
migrations.RenameField(
model_name='unifiedjob',
old_name='tmp_organization',
new_name='organization',
),
# parentage of job template roles has genuinely changed at this point
migrations.AlterField(
model_name='jobtemplate',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.job_template_admin_role'], related_name='+', to='main.Role'),
),
migrations.AlterField(
model_name='jobtemplate',
name='execute_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role', 'organization.execute_role'], related_name='+', to='main.Role'),
),
migrations.AlterField(
model_name='jobtemplate',
name='read_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], related_name='+', to='main.Role'),
),
# Re-compute the role parents and ancestors caching
migrations.RunPython(rebuild_jt_parents, migrations.RunPython.noop),
# for all permissions that will be removed, make them explicit
migrations.RunPython(restore_inventory_admins, restore_inventory_admins_backward),
]
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/main/migrations/0109_v370_job_template_organization_field.py | Python | apache-2.0 | 3,857 |
#!/home/jojoriveraa/Dropbox/Capacitación/Platzi/Python-Django/NFCow/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| jojoriveraa/titulacion-NFCOW | venv/bin/django-admin.py | Python | apache-2.0 | 192 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2.types import session_entity_type
from google.cloud.dialogflow_v2.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import empty_pb2 # type: ignore
from .base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
class SessionEntityTypesGrpcTransport(SessionEntityTypesTransport):
"""gRPC backend transport for SessionEntityTypes.
Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityType].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_session_entity_types(
self,
) -> Callable[
[session_entity_type.ListSessionEntityTypesRequest],
session_entity_type.ListSessionEntityTypesResponse,
]:
r"""Return a callable for the list session entity types method over gRPC.
Returns the list of all session entity types in the
specified session.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.ListSessionEntityTypesRequest],
~.ListSessionEntityTypesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_session_entity_types" not in self._stubs:
self._stubs["list_session_entity_types"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/ListSessionEntityTypes",
request_serializer=session_entity_type.ListSessionEntityTypesRequest.serialize,
response_deserializer=session_entity_type.ListSessionEntityTypesResponse.deserialize,
)
return self._stubs["list_session_entity_types"]
@property
def get_session_entity_type(
self,
) -> Callable[
[session_entity_type.GetSessionEntityTypeRequest],
session_entity_type.SessionEntityType,
]:
r"""Return a callable for the get session entity type method over gRPC.
Retrieves the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.GetSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_session_entity_type" not in self._stubs:
self._stubs["get_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/GetSessionEntityType",
request_serializer=session_entity_type.GetSessionEntityTypeRequest.serialize,
response_deserializer=session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["get_session_entity_type"]
@property
def create_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.CreateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the create session entity type method over gRPC.
Creates a session entity type.
If the specified session entity type already exists,
overrides the session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.CreateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_session_entity_type" not in self._stubs:
self._stubs["create_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/CreateSessionEntityType",
request_serializer=gcd_session_entity_type.CreateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["create_session_entity_type"]
@property
def update_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.UpdateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the update session entity type method over gRPC.
Updates the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.UpdateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_session_entity_type" not in self._stubs:
self._stubs["update_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/UpdateSessionEntityType",
request_serializer=gcd_session_entity_type.UpdateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["update_session_entity_type"]
@property
def delete_session_entity_type(
self,
) -> Callable[
[session_entity_type.DeleteSessionEntityTypeRequest], empty_pb2.Empty
]:
r"""Return a callable for the delete session entity type method over gRPC.
Deletes the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.DeleteSessionEntityTypeRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_session_entity_type" not in self._stubs:
self._stubs["delete_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/DeleteSessionEntityType",
request_serializer=session_entity_type.DeleteSessionEntityTypeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_session_entity_type"]
def close(self):
self.grpc_channel.close()
__all__ = ("SessionEntityTypesGrpcTransport",)
| googleapis/python-dialogflow | google/cloud/dialogflow_v2/services/session_entity_types/transports/grpc.py | Python | apache-2.0 | 18,035 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable, Optional, Tuple
from pants.backend.python.target_types import PythonRequirementsField, PythonSources
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.typecheck.mypy.subsystem import MyPy
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
PexRequirements,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.typecheck import TypecheckRequest, TypecheckResult, TypecheckResults
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.fs import CreateDigest, Digest, DigestContents, FileContent, MergeDigests
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.docutil import bracketed_docs_url
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSources,)
sources: PythonSources
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
@dataclass(frozen=True)
class MyPyPartition:
root_targets: FrozenOrderedSet[Target]
closure: FrozenOrderedSet[Target]
interpreter_constraints: InterpreterConstraints
python_version_already_configured: bool
class MyPyRequest(TypecheckRequest):
field_set_type = MyPyFieldSet
def generate_argv(
mypy: MyPy,
typechecked_venv_pex: VenvPex,
*,
file_list_path: str,
python_version: Optional[str],
) -> Tuple[str, ...]:
args = [f"--python-executable={typechecked_venv_pex.python.argv0}", *mypy.args]
if mypy.config:
args.append(f"--config-file={mypy.config}")
if python_version:
args.append(f"--python-version={python_version}")
args.append(f"@{file_list_path}")
return tuple(args)
def check_and_warn_if_python_version_configured(
*, config: Optional[FileContent], args: Tuple[str, ...]
) -> bool:
configured = []
if config and b"python_version" in config.content:
configured.append(
f"`python_version` in {config.path} (which is used because of the "
"`[mypy].config` option)"
)
if "--py2" in args:
configured.append("`--py2` in the `--mypy-args` option")
if any(arg.startswith("--python-version") for arg in args):
configured.append("`--python-version` in the `--mypy-args` option")
if configured:
formatted_configured = " and you set ".join(configured)
logger.warning(
f"You set {formatted_configured}. Normally, Pants would automatically set this for you "
"based on your code's interpreter constraints "
f"({bracketed_docs_url('python-interpreter-compatibility')}). Instead, it will "
"use what you set.\n\n(Automatically setting the option allows Pants to partition your "
"targets by their constraints, so that, for example, you can run MyPy on Python 2-only "
"code and Python 3-only code at the same time. This feature may no longer work.)"
)
return bool(configured)
def determine_python_files(files: Iterable[str]) -> Tuple[str, ...]:
"""We run over all .py and .pyi files, but .pyi files take precedence.
MyPy will error if we say to run over the same module with both its .py and .pyi files, so we
must be careful to only use the .pyi stub.
"""
result: OrderedSet[str] = OrderedSet()
for f in files:
if f.endswith(".pyi"):
py_file = f[:-1] # That is, strip the `.pyi` suffix to be `.py`.
result.discard(py_file)
result.add(f)
elif f.endswith(".py"):
pyi_file = f + "i"
if pyi_file not in result:
result.add(f)
return tuple(result)
@rule
async def mypy_typecheck_partition(partition: MyPyPartition, mypy: MyPy) -> TypecheckResult:
plugin_target_addresses = await Get(Addresses, UnparsedAddressInputs, mypy.source_plugins)
plugin_transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest(plugin_target_addresses)
)
plugin_requirements = PexRequirements.create_from_requirement_fields(
plugin_tgt[PythonRequirementsField]
for plugin_tgt in plugin_transitive_targets.closure
if plugin_tgt.has_field(PythonRequirementsField)
)
# If the user did not set `--python-version` already, we set it ourselves based on their code's
# interpreter constraints. This determines what AST is used by MyPy.
python_version = (
None
if partition.python_version_already_configured
else partition.interpreter_constraints.minimum_python_version()
)
# MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
# and 3.7. However, typed-ast does not understand 3.8+, so instead we must run MyPy with
# Python 3.8+ when relevant. We only do this if <3.8 can't be used, as we don't want a
# loose requirement like `>=3.6` to result in requiring Python 3.8+, which would error if
# 3.8+ is not installed on the machine.
tool_interpreter_constraints = (
partition.interpreter_constraints
if (
mypy.options.is_default("interpreter_constraints")
and partition.interpreter_constraints.requires_python38_or_newer()
)
else InterpreterConstraints(mypy.interpreter_constraints)
)
plugin_sources_get = Get(
PythonSourceFiles, PythonSourceFilesRequest(plugin_transitive_targets.closure)
)
closure_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
roots_sources_get = Get(
SourceFiles, SourceFilesRequest(tgt.get(PythonSources) for tgt in partition.root_targets)
)
requirements_pex_get = Get(
Pex,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
(tgt.address for tgt in partition.root_targets),
hardcoded_interpreter_constraints=partition.interpreter_constraints,
internal_only=True,
),
)
# TODO(John Sirois): Scope the extra requirements to the partition.
# Right now we just use a global set of extra requirements and these might not be compatible
# with all partitions. See: https://github.com/pantsbuild/pants/issues/11556
mypy_extra_requirements_pex_get = Get(
Pex,
PexRequest(
output_filename="mypy_extra_requirements.pex",
internal_only=True,
requirements=PexRequirements(mypy.extra_requirements),
interpreter_constraints=partition.interpreter_constraints,
),
)
mypy_pex_get = Get(
VenvPex,
PexRequest(
output_filename="mypy.pex",
internal_only=True,
main=mypy.main,
requirements=PexRequirements((*mypy.all_requirements, *plugin_requirements)),
interpreter_constraints=tool_interpreter_constraints,
),
)
config_files_get = Get(ConfigFiles, ConfigFilesRequest, mypy.config_request)
(
plugin_sources,
closure_sources,
roots_sources,
mypy_pex,
requirements_pex,
mypy_extra_requirements_pex,
config_files,
) = await MultiGet(
plugin_sources_get,
closure_sources_get,
roots_sources_get,
mypy_pex_get,
requirements_pex_get,
mypy_extra_requirements_pex_get,
config_files_get,
)
python_files = determine_python_files(roots_sources.snapshot.files)
file_list_path = "__files.txt"
file_list_digest_request = Get(
Digest,
CreateDigest([FileContent(file_list_path, "\n".join(python_files).encode())]),
)
typechecked_venv_pex_request = Get(
VenvPex,
PexRequest(
output_filename="typechecked_venv.pex",
internal_only=True,
pex_path=[requirements_pex, mypy_extra_requirements_pex],
interpreter_constraints=partition.interpreter_constraints,
),
)
typechecked_venv_pex, file_list_digest = await MultiGet(
typechecked_venv_pex_request, file_list_digest_request
)
merged_input_files = await Get(
Digest,
MergeDigests(
[
file_list_digest,
plugin_sources.source_files.snapshot.digest,
closure_sources.source_files.snapshot.digest,
typechecked_venv_pex.digest,
config_files.snapshot.digest,
]
),
)
all_used_source_roots = sorted(
set(itertools.chain(plugin_sources.source_roots, closure_sources.source_roots))
)
env = {
"PEX_EXTRA_SYS_PATH": ":".join(all_used_source_roots),
"MYPYPATH": ":".join(all_used_source_roots),
}
result = await Get(
FallibleProcessResult,
VenvPexProcess(
mypy_pex,
argv=generate_argv(
mypy,
typechecked_venv_pex,
file_list_path=file_list_path,
python_version=python_version,
),
input_digest=merged_input_files,
extra_env=env,
description=f"Run MyPy on {pluralize(len(python_files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return TypecheckResult.from_fallible_process_result(
result, partition_description=str(sorted(str(c) for c in partition.interpreter_constraints))
)
# TODO(#10864): Improve performance, e.g. by leveraging the MyPy cache.
@rule(desc="Typecheck using MyPy", level=LogLevel.DEBUG)
async def mypy_typecheck(
request: MyPyRequest, mypy: MyPy, python_setup: PythonSetup
) -> TypecheckResults:
if mypy.skip:
return TypecheckResults([], typechecker_name="MyPy")
# We batch targets by their interpreter constraints to ensure, for example, that all Python 2
# targets run together and all Python 3 targets run together. We can only do this by setting
# the `--python-version` option, but we allow the user to set it as a safety valve. We warn if
# they've set the option.
config_files = await Get(ConfigFiles, ConfigFilesRequest, mypy.config_request)
config_content = await Get(DigestContents, Digest, config_files.snapshot.digest)
python_version_configured = check_and_warn_if_python_version_configured(
config=next(iter(config_content), None), args=mypy.args
)
# When determining how to batch by interpreter constraints, we must consider the entire
# transitive closure to get the final resulting constraints.
# TODO(#10863): Improve the performance of this.
transitive_targets_per_field_set = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address]))
for field_set in request.field_sets
)
interpreter_constraints_to_transitive_targets = defaultdict(set)
for transitive_targets in transitive_targets_per_field_set:
interpreter_constraints = InterpreterConstraints.create_from_targets(
transitive_targets.closure, python_setup
) or InterpreterConstraints(mypy.interpreter_constraints)
interpreter_constraints_to_transitive_targets[interpreter_constraints].add(
transitive_targets
)
partitions = []
for interpreter_constraints, all_transitive_targets in sorted(
interpreter_constraints_to_transitive_targets.items()
):
combined_roots: OrderedSet[Target] = OrderedSet()
combined_closure: OrderedSet[Target] = OrderedSet()
for transitive_targets in all_transitive_targets:
combined_roots.update(transitive_targets.roots)
combined_closure.update(transitive_targets.closure)
partitions.append(
MyPyPartition(
FrozenOrderedSet(combined_roots),
FrozenOrderedSet(combined_closure),
interpreter_constraints,
python_version_already_configured=python_version_configured,
)
)
partitioned_results = await MultiGet(
Get(TypecheckResult, MyPyPartition, partition) for partition in partitions
)
return TypecheckResults(partitioned_results, typechecker_name="MyPy")
def rules():
return [
*collect_rules(),
UnionRule(TypecheckRequest, MyPyRequest),
*pex_from_targets.rules(),
]
| benjyw/pants | src/python/pants/backend/python/typecheck/mypy/rules.py | Python | apache-2.0 | 13,525 |
import os
import posixpath
import random
import string
import logging
import tempfile
import time
from .extract_images import ExtractImages
logger = logging.getLogger(__name__)
class ExtractImagesToS3(ExtractImages):
'''
This KnowledgePostProcessor subclass extracts images from posts to S3. It
is designed to be used upon addition to a knowledge repository, which can
reduce the size of repositories. It replaces local images with remote urls
based on `http_image_root`.
`s3_image_root` should be the root of the image folder on an S3 remote, such
as "s3://my_bucket/images".
`http_image_root` should be the root of the server where the images will be
accessible after uploading.
Note: This requires that user AWS credentials are set up appropriately and
that they have installed the aws cli packages.
'''
_registry_keys = ['extract_images_to_s3']
def __init__(self, s3_image_root, http_image_root):
self.s3_image_root = s3_image_root
self.http_image_root = http_image_root
def copy_image(self, kp, img_path, is_ref=False, repo_name='knowledge'):
# Copy image data to new file
if is_ref:
_, tmp_path = tempfile.mkstemp()
with open(tmp_path, 'wb') as f:
f.write(kp._read_ref(img_path))
else:
tmp_path = img_path
try:
# Get image type
img_ext = posixpath.splitext(img_path)[1]
# Make random filename for image
random_string = ''.join(random.choice(string.ascii_lowercase) for i in range(6))
fname_img = '{repo_name}_{time}_{random_string}{ext}'.format(
repo_name=repo_name,
time=int(round(time.time() * 100)),
random_string=random_string,
ext=img_ext).strip().replace(' ', '-')
# Copy image to accessible folder on S3
fname_s3 = posixpath.join(self.s3_image_root, repo_name, fname_img)
# Note: The following command may need to be prefixed with a login agent;
# for example, to handle multi-factor authentication.
cmd = "aws s3 cp '{0}' {1}".format(tmp_path, fname_s3)
logger.info("Uploading images to S3: {cmd}".format(cmd=cmd))
retval = os.system(cmd)
if retval != 0:
raise Exception('Problem uploading images to s3')
finally:
# Clean up temporary file
if is_ref:
os.remove(tmp_path)
# return uploaded path of file
return posixpath.join(self.http_image_root, repo_name, fname_img)
def skip_image(self, kp, image):
import re
if re.match('http[s]?://', image['src']):
return True
return False
def cleanup(self, kp):
if kp._has_ref('images'):
kp._drop_ref('images')
| airbnb/knowledge-repo | knowledge_repo/postprocessors/extract_images_to_s3.py | Python | apache-2.0 | 2,903 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Chance (Random) Scheduler implementation
"""
import random
from oslo.config import cfg
from nova import exception
from nova.scheduler import driver
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
class ChanceScheduler(driver.Scheduler):
"""Implements Scheduler as a random node selector."""
def _filter_hosts(self, request_spec, hosts, filter_properties):
"""Filter a list of hosts based on request_spec."""
ignore_hosts = filter_properties.get('ignore_hosts', [])
hosts = [host for host in hosts if host not in ignore_hosts]
return hosts
def _schedule(self, context, topic, request_spec, filter_properties):
"""Picks a host that is up at random."""
elevated = context.elevated()
hosts = self.hosts_up(elevated, topic)
if not hosts:
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
hosts = self._filter_hosts(request_spec, hosts, filter_properties)
if not hosts:
msg = _("Could not find another compute")
raise exception.NoValidHost(reason=msg)
return random.choice(hosts)
def select_hosts(self, context, request_spec, filter_properties):
"""Selects a set of random hosts."""
hosts = [self._schedule(context, CONF.compute_topic,
request_spec, filter_properties)
for instance_uuid in request_spec.get('instance_uuids', [])]
if not hosts:
raise exception.NoValidHost(reason="")
return hosts
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""Create and run an instance or instances."""
instance_uuids = request_spec.get('instance_uuids')
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
try:
host = self._schedule(context, CONF.compute_topic,
request_spec, filter_properties)
updated_instance = driver.instance_update_db(context,
instance_uuid)
self.compute_rpcapi.run_instance(context,
instance=updated_instance, host=host,
requested_networks=requested_networks,
injected_files=injected_files,
admin_password=admin_password,
is_first_time=is_first_time,
request_spec=request_spec,
filter_properties=filter_properties)
except Exception as ex:
# NOTE(vish): we don't reraise the exception here to make sure
# that all instances in the request get set to
# error properly
driver.handle_schedule_error(context, ex, instance_uuid,
request_spec)
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
"""Select a target for resize."""
host = self._schedule(context, CONF.compute_topic, request_spec,
filter_properties)
self.compute_rpcapi.prep_resize(context, image, instance,
instance_type, host, reservations)
| DirectXMan12/nova-hacking | nova/scheduler/chance.py | Python | apache-2.0 | 4,443 |
# Copyright 2013 Gert Kremer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
#############################################################################
#
def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
#
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
#
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We are only support basic authentication for now.
#
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
#
response = HttpResponse()
response.status_code = 401
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
#############################################################################
#
def logged_in_or_basicauth(realm = ""):
"""
A simple decorator that requires a user to be logged in. If they are not
logged in the request is examined for a 'authorization' header.
If the header is present it is tested for basic authentication and
the user is logged in with the provided credentials.
If the header is not present a http 401 is sent back to the
requestor to provide credentials.
The purpose of this is that in several django projects I have needed
several specific views that need to support basic authentication, yet the
web site as a whole used django's provided authentication.
The uses for this are for urls that are access programmatically such as
by rss feed readers, yet the view requires a user to be logged in. Many rss
readers support supplying the authentication credentials via http basic
auth (and they do NOT support a redirect to a form where they post a
username/password.)
Use is simple:
@logged_in_or_basicauth
def your_view:
...
You can provide the name of the realm to ask for authentication within.
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper
return view_decorator
#############################################################################
#
def has_perm_or_basicauth(perm, realm = ""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission.
Use:
@logged_in_or_basicauth('asforums.view_forumcollection')
def your_view:
...
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
| schubergphilis/twitterwall | tweety/basic_auth.py | Python | apache-2.0 | 4,682 |
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
UNKNOWN = "unknown"
PLATFORM_ASR9K_P = 'asr9k_p'
PLATFORM_ASR9K_PX = "asr9k_px"
PLATFORM_CRS_P = "crs_p"
PLATFORM_CRS_PX = "crs_px"
PLATFORM_NCS6K = "ncs6k"
PLATFORM_NCS6K_SYSADMIN = "ncs6k_sysadmin"
PLATFORM_TYPE_UNKNOWN = -1
# IOS XR
PLATFORM_TYPE_ASR9K_PX_SMU = 0
PLATFORM_TYPE_ASR9K_PX_SP = 1
PLATFORM_TYPE_ASR9K_P_SMU = 2
PLATFORM_TYPE_ASR9K_P_PACKAGE = 3
PLATFORM_TYPE_ASR9K_PX_PACKAGE = 4
PLATFORM_TYPE_CRS_PX_SMU = 5
PLATFORM_TYPE_CRS_P_SMU = 6
PLATFORM_TYPE_CRS_PX_PACKAGE = 7
PLATFORM_TYPE_CRS_P_PACKAGE = 8
PLATFORM_TYPE_ASR9K_PX_TAR = 13
"""
Match NCS6K_SMU before NS6K_PACKAGE so a SMU won't
be treated as a package as they have a very similar format.
In addition, the long string (ncs6k-sysadmin) is matched first.
"""
PLATFORM_TYPE_NCS6K_SYSADMIN_SMU = 9;
PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE = 10;
PLATFORM_TYPE_NCS6K_SMU = 11;
PLATFORM_TYPE_NCS6K_PACKAGE = 12;
pattern_list = {}
# disk0:asr9k-mini-p-4.2.1
pattern = re.compile("\\S*asr9k-\\S*-p(-\\d+\\.\\d+\\.\\d+)\\S*")
pattern_list[PLATFORM_TYPE_ASR9K_P_PACKAGE] = pattern
# disk0:asr9k-p-4.2.3.CSCtz89449
pattern = re.compile("\\S*asr9k-p(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*")
pattern_list[PLATFORM_TYPE_ASR9K_P_SMU] = pattern
# disk0:asr9k-mini-px-4.2.1
pattern = re.compile("\\S*asr9k-\\S*-px(-\\d+\\.\\d+\\.\\d+)\\S*")
pattern_list[PLATFORM_TYPE_ASR9K_PX_PACKAGE] = pattern
# disk0:asr9k-px-4.2.3.CSCtz89449
pattern = re.compile("\\S*asr9k-px(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*")
pattern_list[PLATFORM_TYPE_ASR9K_PX_SMU] = pattern
# ASR9K-iosxr-px-k9-5.3.0.tar or ASR9K-iosxr-px-5.3.1-bridge_smus.tar
pattern = re.compile("\\S*ASR9K-iosxr-px\\S*(-\\d+\\.\\d+\\.\\d+)\\S*\\.tar")
pattern_list[PLATFORM_TYPE_ASR9K_PX_TAR] = pattern
# disk0:asr9k-px-4.3.2.sp-1.0.0 or asr9k-px-4.3.2.k9-sp-1.0.0
pattern = re.compile("\\S*asr9k-px(-\\d+\\.\\d+\\.\\d+\\.)\\S*sp\\S*")
pattern_list[PLATFORM_TYPE_ASR9K_PX_SP] = pattern
# disk0:hfr-mini-px-4.2.1
pattern = re.compile("\\S*hfr-\\S*-px(-\\d+\\.\\d+\\.\\d+)\\S*")
pattern_list[PLATFORM_TYPE_CRS_PX_PACKAGE] = pattern
# disk0:hfr-px-4.2.3.CSCtz89449
pattern = re.compile("\\S*hfr-px(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*")
pattern_list[PLATFORM_TYPE_CRS_PX_SMU] = pattern
# disk0:hfr-p-4.2.3.CSCtz89449
pattern = re.compile("\\S*hfr-p(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*")
pattern_list[PLATFORM_TYPE_CRS_P_SMU] = pattern
# disk0:hfr-mini-p-4.2.1
pattern = re.compile("\\S*hfr-\\S*-p(-\\d+\\.\\d+\\.\\d+)\\S*")
pattern_list[PLATFORM_TYPE_CRS_P_PACKAGE] = pattern
# ncs6k-5.0.1.CSCul51055-0.0.2.i
pattern = re.compile("\\S*ncs6k(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*")
pattern_list[PLATFORM_TYPE_NCS6K_SMU] = pattern
# ncs6k-mcast-5.0.1
pattern = re.compile("\\S*ncs6k-\\S*(-\\d+\\.\\d+\\.\\d+)\\S*")
pattern_list[PLATFORM_TYPE_NCS6K_PACKAGE] = pattern
# ncs6k-sysadmin-5.0.0.CSCul30161
pattern = re.compile("\\S*ncs6k-sysadmin(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*")
pattern_list[PLATFORM_TYPE_NCS6K_SYSADMIN_SMU] = pattern
# ncs6k-sysadmin-mcast-5.0.1
pattern = re.compile("\\S*ncs6k-sysadmin-\\S*(-\\d+\\.\\d+\\.\\d+)\\S*")
pattern_list[PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE] = pattern
def get_IOSXR_release(name):
matches = re.findall("\d+\.\d+\.\d+", name)
if matches:
return matches[0]
return UNKNOWN
def get_NCS6K_release(name):
"""
Example,
input: ncs6k-xr-5.0.1
ncs6k-5.0.1.CSCul51055-0.0.2.i
ncs6k-sysadmin-xr-5.0.1
ncs6k-sysadmin-5.0.1.CSCul51055-0.0.2.i
ASR9K-iosxr-px-k9-5.0.1.tar
ASR9K-iosxr-px-5.0.1-bridge_smus.tar
output: 5.0.1
"""
matches = re.findall("\d+\.\d+\.\d+", name)
if matches:
return matches[0]
return UNKNOWN
def get_platform_type(name):
for platform_type in pattern_list:
pattern = pattern_list[platform_type]
if pattern.match(name):
return platform_type
return PLATFORM_TYPE_UNKNOWN
def get_platform(name):
"""
Returns the platform based on the pattern type.
ASR9K-PX, CRS-PX, NCS6K
"""
platform_type = get_platform_type(name)
if platform_type == PLATFORM_TYPE_ASR9K_P_SMU or \
platform_type == PLATFORM_TYPE_ASR9K_P_PACKAGE:
return PLATFORM_ASR9K_P
elif platform_type == PLATFORM_TYPE_ASR9K_PX_PACKAGE or \
platform_type == PLATFORM_TYPE_ASR9K_PX_SMU or \
platform_type == PLATFORM_TYPE_ASR9K_PX_SP or \
platform_type == PLATFORM_TYPE_ASR9K_PX_TAR:
return PLATFORM_ASR9K_PX
elif platform_type == PLATFORM_TYPE_CRS_PX_SMU or \
platform_type == PLATFORM_TYPE_CRS_PX_PACKAGE:
return PLATFORM_CRS_PX
elif platform_type == PLATFORM_TYPE_CRS_P_SMU or \
platform_type == PLATFORM_TYPE_CRS_P_PACKAGE:
return PLATFORM_CRS_P
elif platform_type == PLATFORM_TYPE_NCS6K_SMU or \
platform_type == PLATFORM_TYPE_NCS6K_PACKAGE:
return PLATFORM_NCS6K
elif platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_SMU or \
platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE:
return PLATFORM_NCS6K_SYSADMIN
else:
return UNKNOWN
def get_release(name):
platform_type = get_platform_type(name)
if platform_type == PLATFORM_TYPE_ASR9K_P_SMU or \
platform_type == PLATFORM_TYPE_ASR9K_P_PACKAGE or \
platform_type == PLATFORM_TYPE_CRS_P_SMU or \
platform_type == PLATFORM_TYPE_CRS_P_PACKAGE or \
platform_type == PLATFORM_TYPE_ASR9K_PX_PACKAGE or \
platform_type == PLATFORM_TYPE_ASR9K_PX_SMU or \
platform_type == PLATFORM_TYPE_ASR9K_PX_SP or \
platform_type == PLATFORM_TYPE_CRS_PX_SMU or \
platform_type == PLATFORM_TYPE_CRS_PX_PACKAGE or \
platform_type == PLATFORM_TYPE_ASR9K_PX_TAR:
return get_IOSXR_release(name)
elif platform_type == PLATFORM_TYPE_NCS6K_SMU or \
platform_type == PLATFORM_TYPE_NCS6K_PACKAGE or \
platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_SMU or \
platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE:
return get_NCS6K_release(name)
else:
return UNKNOWN;
if __name__ == '__main__':
names = []
names.append('ASR9K-iosxr-px-k9-5.3.1.tar')
names.append('ASR9K-iosxr-px-5.3.1-bridge_smus.tar')
names.append('asr9k-px-5.3.1.CSCuv00898.pie')
names.append('ASR9K-iosxr-px-k9-5.1.3.tar')
names.append('asr9k-px-5.1.3.CSCuw01943.pie')
names.append('ASR9K-iosxr-px-k9-5.3.0.tar')
names.append('ASR9K-iosxr-px-5.3.0-turboboot.tar')
names.append('ASR9K-iosxr-px-5.30.0.tar')
names.append('asr9k-px-5.2.2.sp1.pie')
for name in names:
print name
print(get_platform(name), get_release(name))
print | kstaniek/csm | csmserver/platform_matcher.py | Python | apache-2.0 | 8,457 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = '[email protected] (Philip Guo)'
import datetime
import logging
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import ValidStudent
from models.models import Profile
from models.models import StudentAnswersEntity
from tools import verify
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
from google.appengine.ext import db
# questions per module - training 2 - 12 modules
# last is postcourse
# training
#MODULE_QUESTIONS = [4,10,7,5,5,5,5,7,5,5,5,11,7]
# recertification
MODULE_QUESTIONS = [2,4,5,4,3,7]
# mandatory modules 1 to 8 - needed?
#MANDATORY_MODULES = 8
# number of question modules
#MAX_MODULES = 6
MAX_MODULES = len(MODULE_QUESTIONS)-1
def calc_total_score(student):
#
mn = MODULE_QUESTIONS
# mm = MANDATORY_MODULES
#
overall_score = -1
ms = []
for i in range(1,MAX_MODULES+1):
course = 'a'+str(i)+'course'
ms.append(utils.get_score(student, course))
# get profile for this user - mandatary modules
valid = ValidStudent.get_valid(student.key().name())
prof = Profile.get_by_key_name(valid.profile)
auth = eval(prof.auth)
# complete = mandatory modules are done (have scores)
complete = True
i = 0
for score in ms[:MAX_MODULES]:
if auth[i]:
complete = complete and (score <> None)
i += 1
# compute overall score after mandatory modules are done
if complete:
part_score = 0
tq = 0
for i in range(MAX_MODULES):
if ms[i] <> None:
part_score += mn[i] * ms[i]
tq += mn[i]
# todo - somar 0.5 antes do int?
overall_score = int((part_score/tq)+0.5)
return overall_score
def store_score(course, student, assessment_name, assessment_type,score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_name)
# remember to cast to int for comparison
# logging.error('assessment name : %s exist score : %s score %s ',assessment_name,existing_score, score)
if assessment_name != 'postcourse':
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_name, score)
# special handling for computing final score:
if assessment_name == 'postcourse':
# midcourse_score = utils.get_score(student, 'midcourse')
# if midcourse_score is None:
# midcourse_score = 0
# else:
# midcourse_score = int(midcourse_score)
if existing_score is None:
postcourse_score = score
else:
postcourse_score = int(existing_score)
if score > postcourse_score:
postcourse_score = score
# Calculate overall score based on a formula
overall_score = calc_total_score(student)
# logging.error('overall_score : %s ', overall_score)
# if utils.get_score(student, 'postcourse') == 0 and (overall_score > -1) :
# utils.set_score(student, 'postcourse', overall_score)
# utils.set_score(student, 'overall_score', overall_score)
# TODO(pgbovine): this changing of assessment_type is ugly ...
if overall_score == 100:
assessment_name = 'postcourse_100'
else:
if overall_score >= 90:
assessment_name = 'postcourse_pass'
else:
if overall_score > 0:
assessment_name = 'postcourse_fail'
else:
assessment_name = 'not_complete'
# utils.set_score(student, 'overall_score', overall_score)
# store the overall_score of the first run of training in post_course
# post_s= utils.get_score(student, 'postcourse')
# logging.error('postcourse : %s ', utils.get_score(student, 'postcourse'))
if utils.get_score(student, 'postcourse') == None and (overall_score > -1):
utils.set_score(student, 'postcourse', overall_score)
utils.set_score(student, 'overall_score', overall_score)
over_s= utils.get_score(student, 'overall_score')
if over_s <> None:
overall_score = calc_total_score(student)
utils.set_score(student, 'overall_score', overall_score)
return assessment_name
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_name,assessment_type,new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the type of the assessment (as stated in unit.csv).
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_name, new_answers)
store_score(course, student, assessment_name, assessment_type,score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
'type': 'assessment-%s' % assessment_name,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
assessment_name = self.request.get('assessment_name')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_name
# self.template_value['assessment'] = self.request.get('assessment_name')
self.template_value['assessment_name'] = unit.title
self.template_value['is_last_assessment'] = (
course.is_last_assessment(unit))
# Convert answers from JSON to dict.
answers = self.request.get('answers')
answers = transforms.loads(answers) if answers else []
grader = unit.workflow.get_grader()
# Scores are not recorded for human-reviewed assignments.
score = 0
if grader == courses.AUTO_GRADER:
score = int(round(float(self.request.get('score'))))
# Record assessment transaction.
student = self.update_assessment_transaction(
student.key().name(), assessment_name, assessment_type, answers, score)
if grader == courses.HUMAN_GRADER:
rp = course.get_reviews_processor()
# Guard against duplicate submissions of a human-graded assessment.
previously_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
if not previously_submitted:
# Check that the submission due date has not passed.
time_now = datetime.datetime.now()
submission_due_date = unit.workflow.get_submission_due_date()
if time_now > submission_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['submission_due_date'] = (
submission_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = (
'assignment_deadline_exceeded')
self.render('error.html')
return
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
rp.start_review_process_for(
unit.unit_id, submission_key, student.get_key())
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
self.template_value['previously_submitted'] = previously_submitted
matcher = unit.workflow.get_matcher()
self.template_value['matcher'] = matcher
if matcher == review.PEER_MATCHER:
self.template_value['review_dashboard_url'] = (
'reviewdashboard?unit=%s' % unit.unit_id
)
self.render('reviewed_assessment_confirmation.html')
return
else:
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
# Save the submission in the datastore, overwriting the earlier
# version if it exists.
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
self.template_value['result'] = course.get_overall_result(student)
self.template_value['score'] = score
self.template_value['overall_score'] = course.get_overall_score(
student)
self.render('test_confirmation.html')
| ksh/gpirecertification | controllers/assessments.py | Python | apache-2.0 | 11,743 |
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
from neutron.tests import base
from neutron.tests.unit import test_api_v2
import neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas as fwaas
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall('legacy', apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.remove_chain(ingress_chain),
mock.call.remove_chain(egress_chain),
mock.call.remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall('legacy', apply_list, firewall)
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_update_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_create_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr')
def test_update_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr')
| citrix-openstack-build/neutron-fwaas | neutron_fwaas/tests.skip/unit/services/firewall/drivers/linux/test_iptables_fwaas.py | Python | apache-2.0 | 11,580 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from typing import Collection, Dict, List, Optional, Tuple, TypeVar, Union
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api.api_util import mm_export
from tensorflow_examples.lite.model_maker.core.data_util import dataloader
from tensorflow_examples.lite.model_maker.core.data_util import object_detector_dataloader_util as util
import yaml
from tensorflow_examples.lite.model_maker.third_party.efficientdet import dataloader as det_dataloader
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util
DetectorDataLoader = TypeVar('DetectorDataLoader', bound='DataLoader')
# Csv lines with the label map.
CsvLines = Tuple[List[List[List[str]]], Dict[int, str]]
def _get_label_map(label_map):
"""Gets the label map dict."""
if isinstance(label_map, list):
label_map_dict = {}
for i, label in enumerate(label_map):
# 0 is resevered for background.
label_map_dict[i + 1] = label
label_map = label_map_dict
label_map = label_util.get_label_map(label_map)
if 0 in label_map and label_map[0] != 'background':
raise ValueError('0 must be resevered for background.')
label_map.pop(0, None)
name_set = set()
for idx, name in label_map.items():
if not isinstance(idx, int):
raise ValueError('The key (label id) in label_map must be integer.')
if not isinstance(name, str):
raise ValueError('The value (label name) in label_map must be string.')
if name in name_set:
raise ValueError('The value: %s (label name) can\'t be duplicated.' %
name)
name_set.add(name)
return label_map
def _group_csv_lines(csv_file: str,
set_prefixes: List[str],
delimiter: str = ',',
quotechar: str = '"') -> CsvLines:
"""Groups csv_lines for different set_names and label_map.
Args:
csv_file: filename of the csv file.
set_prefixes: Set prefix names for training, validation and test data. e.g.
['TRAIN', 'VAL', 'TEST'].
delimiter: Character used to separate fields.
quotechar: Character used to quote fields containing special characters.
Returns:
[training csv lines, validation csv lines, test csv lines], label_map
"""
# Dict that maps integer label ids to string label names.
label_map = {}
with tf.io.gfile.GFile(csv_file, 'r') as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
# `lines_list` = [training csv lines, validation csv lines, test csv lines]
# Each csv line is a list of strings separated by delimiter. e.g.
# row 'one,two,three' in the csv file will be ['one', two', 'three'].
lines_list = [[], [], []]
for line in reader:
# Groups lines by the set_name.
set_name = line[0].strip()
for i, set_prefix in enumerate(set_prefixes):
if set_name.startswith(set_prefix):
lines_list[i].append(line)
label = line[2].strip()
# Updates label_map if it's a new label.
if label not in label_map.values():
label_map[len(label_map) + 1] = label
return lines_list, label_map
@mm_export('object_detector.DataLoader')
class DataLoader(dataloader.DataLoader):
"""DataLoader for object detector."""
def __init__(self,
tfrecord_file_patten,
size,
label_map,
annotations_json_file=None):
"""Initialize DataLoader for object detector.
Args:
tfrecord_file_patten: Glob for tfrecord files. e.g. "/tmp/coco*.tfrecord".
size: The size of the dataset.
label_map: Variable shows mapping label integers ids to string label
names. 0 is the reserved key for `background` and doesn't need to be
included in label_map. Label names can't be duplicated. Supported
formats are:
1. Dict, map label integers ids to string label names, such as {1:
'person', 2: 'notperson'}. 2. List, a list of label names such as
['person', 'notperson'] which is
the same as setting label_map={1: 'person', 2: 'notperson'}.
3. String, name for certain dataset. Accepted values are: 'coco', 'voc'
and 'waymo'. 4. String, yaml filename that stores label_map.
annotations_json_file: JSON with COCO data format containing golden
bounding boxes. Used for validation. If None, use the ground truth from
the dataloader. Refer to
https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
for the description of COCO data format.
"""
super(DataLoader, self).__init__(dataset=None, size=size)
self.tfrecord_file_patten = tfrecord_file_patten
self.label_map = _get_label_map(label_map)
self.annotations_json_file = annotations_json_file
@classmethod
def from_pascal_voc(
cls,
images_dir: str,
annotations_dir: str,
label_map: Union[List[str], Dict[int, str], str],
annotation_filenames: Optional[Collection[str]] = None,
ignore_difficult_instances: bool = False,
num_shards: int = 100,
max_num_images: Optional[int] = None,
cache_dir: Optional[str] = None,
cache_prefix_filename: Optional[str] = None) -> DetectorDataLoader:
"""Loads from dataset with PASCAL VOC format.
Refer to
https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
for the description of PASCAL VOC data format.
LabelImg Tool (https://github.com/tzutalin/labelImg) can annotate the image
and save annotations as XML files in PASCAL VOC data format.
Annotations are in the folder: `annotations_dir`.
Raw images are in the foloder: `images_dir`.
Args:
images_dir: Path to directory that store raw images.
annotations_dir: Path to the annotations directory.
label_map: Variable shows mapping label integers ids to string label
names. 0 is the reserved key for `background`. Label names can't be
duplicated. Supported format: 1. Dict, map label integers ids to string
label names, e.g.
{1: 'person', 2: 'notperson'}. 2. List, a list of label names. e.g.
['person', 'notperson'] which is
the same as setting label_map={1: 'person', 2: 'notperson'}.
3. String, name for certain dataset. Accepted values are: 'coco', 'voc'
and 'waymo'. 4. String, yaml filename that stores label_map.
annotation_filenames: Collection of annotation filenames (strings) to be
loaded. For instance, if there're 3 annotation files [0.xml, 1.xml,
2.xml] in `annotations_dir`, setting annotation_filenames=['0', '1']
makes this method only load [0.xml, 1.xml].
ignore_difficult_instances: Whether to ignore difficult instances.
`difficult` can be set inside `object` item in the annotation xml file.
num_shards: Number of shards for output file.
max_num_images: Max number of imags to process.
cache_dir: The cache directory to save TFRecord, metadata and json file.
When cache_dir is not set, a temporary folder will be created and will
not be removed automatically after training which makes it can be used
later.
cache_prefix_filename: The cache prefix filename. If not set, will
automatically generate it based on `image_dir`, `annotations_dir` and
`annotation_filenames`.
Returns:
ObjectDetectorDataLoader object.
"""
label_map = _get_label_map(label_map)
# If `cache_prefix_filename` is None, automatically generates a hash value.
if cache_prefix_filename is None:
cache_prefix_filename = util.get_cache_prefix_filename_from_pascal(
images_dir=images_dir,
annotations_dir=annotations_dir,
annotation_filenames=annotation_filenames,
num_shards=num_shards)
cache_files = util.get_cache_files(
cache_dir=cache_dir,
cache_prefix_filename=cache_prefix_filename,
num_shards=num_shards)
# If not cached, writes data into tfrecord_file_paths and
# annotations_json_file_path.
# If `num_shards` differs, it's still not cached.
if not util.is_cached(cache_files):
cache_writer = util.PascalVocCacheFilesWriter(
label_map=label_map,
images_dir=images_dir,
num_shards=num_shards,
max_num_images=max_num_images,
ignore_difficult_instances=ignore_difficult_instances)
cache_writer.write_files(
cache_files=cache_files,
annotations_dir=annotations_dir,
annotation_filenames=annotation_filenames)
return cls.from_cache(cache_files.cache_prefix)
@classmethod
def from_csv(
cls,
filename: str,
images_dir: Optional[str] = None,
delimiter: str = ',',
quotechar: str = '"',
num_shards: int = 10,
max_num_images: Optional[int] = None,
cache_dir: Optional[str] = None,
cache_prefix_filename: Optional[str] = None
) -> List[Optional[DetectorDataLoader]]:
"""Loads the data from the csv file.
The csv format is shown in
https://cloud.google.com/vision/automl/object-detection/docs/csv-format. We
supports bounding box with 2 vertices for now. We support the files in the
local machine as well.
Args:
filename: Name of the csv file.
images_dir: Path to directory that store raw images. If None, the image
path in the csv file is the path to Google Cloud Storage or the absolute
path in the local machine.
delimiter: Character used to separate fields.
quotechar: Character used to quote fields containing special characters.
num_shards: Number of shards for output file.
max_num_images: Max number of imags to process.
cache_dir: The cache directory to save TFRecord, metadata and json file.
When cache_dir is None, a temporary folder will be created and will not
be removed automatically after training which makes it can be used
later.
cache_prefix_filename: The cache prefix filename. If None, will
automatically generate it based on `filename`.
Returns:
train_data, validation_data, test_data which are ObjectDetectorDataLoader
objects. Can be None if without such data.
"""
# If `cache_prefix_filename` is None, automatically generates a hash value.
if cache_prefix_filename is None:
cache_prefix_filename = util.get_cache_prefix_filename_from_csv(
csv_file=filename, num_shards=num_shards)
# Gets a list of cache files mapping `set_prefixes`.
set_prefixes = ['TRAIN', 'VAL', 'TEST']
cache_files_list = util.get_cache_files_sequence(
cache_dir=cache_dir,
cache_prefix_filename=cache_prefix_filename,
set_prefixes=set_prefixes,
num_shards=num_shards)
# If not cached, writes data into tfrecord_file_paths and
# annotations_json_file_path.
# If `num_shards` differs, it's still not cached.
if not util.is_all_cached(cache_files_list):
lines_list, label_map = _group_csv_lines(
csv_file=filename,
set_prefixes=set_prefixes,
delimiter=delimiter,
quotechar=quotechar)
cache_writer = util.CsvCacheFilesWriter(
label_map=label_map,
images_dir=images_dir,
num_shards=num_shards,
max_num_images=max_num_images)
for cache_files, csv_lines in zip(cache_files_list, lines_list):
if csv_lines:
cache_writer.write_files(cache_files, csv_lines=csv_lines)
# Loads training & validation & test data from cache.
data = []
for cache_files in cache_files_list:
cache_prefix = cache_files.cache_prefix
try:
data.append(cls.from_cache(cache_prefix))
except ValueError:
# No training / validation / test data in the csv file.
# For instance, there're only training and test data in the csv file,
# this will make this function return `train_data, None, test_data`
data.append(None)
return data
@classmethod
def from_cache(cls, cache_prefix):
"""Loads the data from cache.
Args:
cache_prefix: The cache prefix including the cache directory and the cache
prefix filename, e.g: '/tmp/cache/train'.
Returns:
ObjectDetectorDataLoader object.
"""
# Gets TFRecord files.
tfrecord_file_patten = cache_prefix + '*.tfrecord'
if not tf.io.gfile.glob(tfrecord_file_patten):
raise ValueError('TFRecord files are empty.')
# Loads meta_data.
meta_data_file = cache_prefix + util.META_DATA_FILE_SUFFIX
if not tf.io.gfile.exists(meta_data_file):
raise ValueError('Metadata file %s doesn\'t exist.' % meta_data_file)
with tf.io.gfile.GFile(meta_data_file, 'r') as f:
meta_data = yaml.load(f, Loader=yaml.FullLoader)
# Gets annotation json file.
ann_json_file = cache_prefix + util.ANN_JSON_FILE_SUFFIX
if not tf.io.gfile.exists(ann_json_file):
ann_json_file = None
return DataLoader(tfrecord_file_patten, meta_data['size'],
meta_data['label_map'], ann_json_file)
def gen_dataset(self,
model_spec,
batch_size=None,
is_training=False,
use_fake_data=False):
"""Generate a batched tf.data.Dataset for training/evaluation.
Args:
model_spec: Specification for the model.
batch_size: A integer, the returned dataset will be batched by this size.
is_training: A boolean, when True, the returned dataset will be optionally
shuffled and repeated as an endless dataset.
use_fake_data: Use fake input.
Returns:
A TF dataset ready to be consumed by Keras model.
"""
reader = det_dataloader.InputReader(
self.tfrecord_file_patten,
is_training=is_training,
use_fake_data=use_fake_data,
max_instances_per_image=model_spec.config.max_instances_per_image,
debug=model_spec.config.debug)
self._dataset = reader(model_spec.config.as_dict(), batch_size=batch_size)
return self._dataset
def split(self, fraction):
"""This function isn't implemented for the object detection task."""
raise NotImplementedError(
'split function is not supported in the object detection task.')
| tensorflow/examples | tensorflow_examples/lite/model_maker/core/data_util/object_detector_dataloader.py | Python | apache-2.0 | 15,144 |
from . import families
from .glm import glm, linear_component, plot_posterior_predictive
| wanderer2/pymc3 | pymc3/glm/__init__.py | Python | apache-2.0 | 89 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListSuggestions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2beta1_Participants_ListSuggestions_async]
from google.cloud import dialogflow_v2beta1
async def sample_list_suggestions():
# Create a client
client = dialogflow_v2beta1.ParticipantsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2beta1.ListSuggestionsRequest(
)
# Make the request
page_result = client.list_suggestions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2beta1_Participants_ListSuggestions_async]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_generated_dialogflow_v2beta1_participants_list_suggestions_async.py | Python | apache-2.0 | 1,546 |
from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
__version__ = '0.3.0'
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-Webhooks'
ext_name = 'webhooks'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['api_key'] = config.String()
schema['api_key_header_name'] = config.String()
schema['status_update_interval'] = config.Integer()
schema['webhook_url'] = config.String()
return schema
def setup(self, registry):
from .frontend import WebhookFrontend
registry.add('frontend', WebhookFrontend)
| pombredanne/mopidy-webhooks | mopidy_webhooks/__init__.py | Python | apache-2.0 | 877 |
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''update load balancer amphora relationship
Revision ID: 4c094013699a
Revises: 35dee79d5865
Create Date: 2014-09-15 14:42:44.875448
'''
# revision identifiers, used by Alembic.
revision = '4c094013699a'
down_revision = '35dee79d5865'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
u'amphora',
sa.Column(u'load_balancer_id', sa.String(36),
sa.ForeignKey(u'load_balancer.id',
name=u'fk_amphora_load_balancer_id'),
nullable=True)
)
op.drop_table(u'load_balancer_amphora')
op.drop_constraint(
u'fk_container_provisioning_status_name', u'amphora',
type_=u'foreignkey'
)
op.create_foreign_key(
u'fk_amphora_provisioning_status_name', u'amphora',
u'provisioning_status', [u'status'], [u'name']
)
def downgrade():
op.drop_constraint(
u'fk_amphora_load_balancer_id', u'amphora', type_=u'foreignkey'
)
op.drop_column(
u'amphora', u'load_balancer_id'
)
op.create_table(
u'load_balancer_amphora',
sa.Column(u'amphora_id', sa.String(36), nullable=False),
sa.Column(u'load_balancer_id', sa.String(36), nullable=False),
sa.ForeignKeyConstraint(
[u'load_balancer_id'], [u'load_balancer.id'],
name=u'fk_load_balancer_amphora_load_balancer_id'),
sa.ForeignKeyConstraint([u'amphora_id'],
[u'amphora.id'],
name=u'fk_load_balancer_amphora_id'),
sa.PrimaryKeyConstraint(u'amphora_id', u'load_balancer_id')
)
op.drop_constraint(
u'fk_amphora_provisioning_status_name', u'amphora',
type_=u'foreignkey'
)
op.create_foreign_key(
u'fk_container_provisioning_status_name', u'amphora',
u'provisioning_status', [u'status'], [u'name']
)
| brandonlogan/octavia | octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amphora.py | Python | apache-2.0 | 2,518 |
"""Unit test for directory watcher (inotify).
"""
import errno
import os
import shutil
import tempfile
import select
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
from treadmill import idirwatch
class DirWatcherTest(unittest.TestCase):
"""Tests for teadmill.idirwatch."""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_watcher(self):
"""Tests created/deleted callbackes."""
created = []
modified = []
deleted = []
test_file = os.path.join(self.root, 'a')
watcher = idirwatch.DirWatcher(self.root)
watcher.on_created = lambda x: created.append(x) or 'one'
watcher.on_modified = lambda x: modified.append(x) or 'two'
watcher.on_deleted = lambda x: deleted.append(x) or 'three'
with open(test_file, 'w') as f:
f.write('hello')
with open(test_file, 'a') as f:
f.write(' world!')
os.unlink(test_file)
with open(test_file, 'w') as f:
f.write('hello again')
res = watcher.process_events(max_events=3)
self.assertEqual([test_file], created)
self.assertEqual([test_file], modified)
self.assertEqual([test_file], deleted)
self.assertEqual(
[
(idirwatch.DirWatcherEvent.CREATED, test_file, 'one'),
(idirwatch.DirWatcherEvent.MODIFIED, test_file, 'two'),
(idirwatch.DirWatcherEvent.DELETED, test_file, 'three'),
(idirwatch.DirWatcherEvent.MORE_PENDING, None, None),
],
res,
)
@mock.patch('select.poll', mock.Mock())
def test_signal(self):
"""Tests behavior when signalled during wait."""
watcher = idirwatch.DirWatcher(self.root)
mocked_pollobj = select.poll.return_value
mocked_pollobj.poll.side_effect = select.error(errno.EINTR, '')
self.assertFalse(watcher.wait_for_events())
if __name__ == '__main__':
unittest.main()
| toenuff/treadmill | tests/idirwatcher_test.py | Python | apache-2.0 | 2,182 |
from webfs import WebFSStat
import stat
def Test_Basic():
fields = ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime')
st = WebFSStat()
print st.__dict__.keys()
for field in fields:
assert field in st.__dict__.keys(), 'field(%s) is not in members' % field
def Test_InitParam():
st = WebFSStat()
assert st.st_mode == stat.S_IFDIR | 0555
st = WebFSStat(False)
assert st.st_mode == stat.S_IFREG | 0444
def Test_IsDir():
st = WebFSStat()
assert st.isDir()
st = WebFSStat(False)
assert not st.isDir()
| harun-emektar/webfs | tests/Test_WebFSStat.py | Python | apache-2.0 | 655 |
import os
import shutil
import logging
from getpass import getuser
from dls_ade import vcs_git, Server
from dls_ade.exceptions import (RemoteRepoError, VerificationError,
ArgumentError)
class ModuleCreator(object):
"""Abstract base class for the management of the creation of new modules.
Attributes:
_area: The 'area' of the module to be created.
_cwd: The current working directory upon initialisation.
_module_name: The base name of the module path.
_module_path: The relative module path.
Used in messages and exceptions for user-friendliness.
abs_module_path: The absolute module path.
Used for system and git commands.
_server_repo_path: The git repository server path for module.
_module_template: Object that handles file and user message creation.
_remote_repo_valid(bool): True if conflicting paths exist on server.
This flag is separated as the user needs to check this towards
the beginning to avoid unnecessary file creation.
_can_create_local_module(bool): True if can run create_local_module.
_can_push_repo_to_remote(bool): True if can run push_repo_to_remote.
Raises:
:class:`~dls_ade.exceptions.ModuleCreatorError`: Base class for this \
module's exceptions
"""
def __init__(self, module_path, area, module_template_cls,
**kwargs):
"""Default initialisation of all object attributes.
Args:
module_path: The relative module path.
Used in messages and exceptions for user-friendliness.
area: The development area of the module to be created.
In particular, this specifies the exact template files to be
created as well as affecting the repository server path.
module_template_cls: Class for module_template object.
Must be a non-abstract subclass of ModuleTemplate.
kwargs: Additional arguments for module creation.
"""
self._usermsg = logging.getLogger("usermessages")
self._area = area
self._cwd = os.getcwd()
self._module_path = module_path
self._module_name = os.path.basename(os.path.normpath(
self._module_path))
self.server = Server()
self.abs_module_path = os.path.join(self._cwd, self._module_path)
self._server_repo_path = self.server.dev_module_path(self._module_path,
self._area)
template_args = {'module_name': self._module_name,
'module_path': self._module_path,
'user_login': getuser()}
if kwargs:
template_args.update(kwargs)
self._module_template = module_template_cls(template_args)
self._remote_repo_valid = False
# These boolean values allow us to call the methods in any order
self._can_create_local_module = False
self._can_push_repo_to_remote = False
def verify_remote_repo(self):
"""Verifies there are no name conflicts with the remote repository.
This checks whether or not there are any name conflicts between the
intended module name and the modules that already exist on the remote
repository.
Sets the `_remote_repo_valid` boolean value to True if there are no
conflicts.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: If there is a \
name conflict with the server.
"""
if self._remote_repo_valid:
return
if self.server.is_server_repo(self._server_repo_path):
err_message = ("The path {dir:s} already exists on server,"
" cannot continue")
raise VerificationError(
err_message.format(dir=self._server_repo_path)
)
self._remote_repo_valid = True
def verify_can_create_local_module(self):
"""Verifies that conditions are suitable for creating a local module.
When :meth:`create_local_module` is called, if the boolean value
`_can_create_local_module` is False, this method is run to make sure
that :meth:`create_local_module` can operate completely.
This method also sets the `_can_create_local_module` attribute to True
so it can be run separately before :meth:`create_local_module`.
This method will fail (raise a VerificationError) if:
- The intended local directory for creation already exists
- The user is currently inside a git repository
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local module \
cannot be created.
"""
if self._can_create_local_module:
return
err_list = []
if os.path.exists(self.abs_module_path):
err_list.append("Directory {dir:s} already exists, "
"please move elsewhere and try again.")
if vcs_git.is_in_local_repo(self._cwd):
err_list.append("Currently in a git repository, "
"please move elsewhere and try again.")
if err_list:
err_message = "\n".join(err_list).format(dir=self._module_path)
self._can_create_local_module = False
raise VerificationError(err_message)
self._can_create_local_module = True
def verify_can_push_repo_to_remote(self):
"""Verifies that one can push the local module to the remote server.
When :meth:`push_repo_to_remote` is called, if the boolean value
`_can_push_repo_to_remote` is False, this method is run to make sure
that :meth:`push_repo_to_remote` can operate completely.
This method also sets the `_can_push_repo_to_remote` attribute to True
so it can be run separately before :meth:`push_repo_to_remote`.
This method will fail (raise a VerificationError) if:
- The local module does not exist
- The local module is not a git repository
- There is a naming conflict with the remote server
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local repository \
cannot be pushed to remote.
"""
if self._can_push_repo_to_remote:
return
self._can_push_repo_to_remote = True
err_list = []
if not os.path.exists(self.abs_module_path):
err_list.append("Directory {dir:s} does not exist.")
else:
mod_dir_is_repo = vcs_git.is_local_repo_root(self.abs_module_path)
if not mod_dir_is_repo:
err_list.append("Directory {dir:s} is not a git repository. "
"Unable to push to remote repository.")
err_list = [err.format(dir=self._module_path) for err in err_list]
# This allows us to retain the remote_repo_valid error message
if not self._remote_repo_valid:
try:
self.verify_remote_repo()
except VerificationError as e:
err_list.append(str(e))
if err_list:
self._can_push_repo_to_remote = False
raise VerificationError("\n".join(err_list))
def create_local_module(self):
"""Creates the folder structure and files in a new git repository.
This will use the file creation specified in
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`. It will
also stage and commit these files to a git repository located in the
same directory
Note:
This will set `_can_create_local_module` False in order to prevent
the user calling this method twice in succession.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local module \
cannot be created.
OSError: The abs_module_path already exists (outside interference).
"""
self.verify_can_create_local_module()
self._can_create_local_module = False
self._usermsg.info("Making clean directory structure for %s",
self._module_path)
os.makedirs(self.abs_module_path)
# The reason why we have to change directory into the folder where the
# files are created is in order to remain compatible with
# makeBaseApp.pl, used for IOC and Support modules
os.chdir(self.abs_module_path)
self._module_template.create_files()
os.chdir(self._cwd)
repo = vcs_git.init_repo(self.abs_module_path)
vcs_git.stage_all_files_and_commit(repo, "Initial commit")
def get_print_message(self):
"""Prints a message to detail the user's next steps."""
return self._module_template.get_print_message()
def push_repo_to_remote(self):
"""Pushes the local repo to the remote server.
Note:
This will set `_can_push_repo_to_remote` and `_remote_repo_valid`
False in order to prevent the user calling this method twice in
succession.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local repository \
cannot be pushed to remote.
:class:`~dls_ade.exceptions.VCSGitError`: If issue with adding a \
new remote and pushing.
"""
self.verify_can_push_repo_to_remote()
self._can_push_repo_to_remote = False
self._remote_repo_valid = False
vcs = self.server.create_new_local_repo(self._module_name, self._area,
self.abs_module_path)
vcs.add_new_remote_and_push(self._server_repo_path)
class ModuleCreatorWithApps(ModuleCreator):
"""Abstract class for the management of the creation of app-based modules.
Attributes:
_app_name: The name of the app for the new module.
This is a separate folder in each git repository, corresponding to
the newly created module.
Raises:
:class:`~dls_ade.exceptions.ArgumentError`: If 'app_name' not given \
as a keyword argument
"""
def __init__(self, module_path, area, module_template, **kwargs):
"""Initialise variables.
Args:
kwargs: Must include app_name.
"""
if 'app_name' not in kwargs:
raise ArgumentError("'app_name' must be provided as keyword "
"argument.")
super(ModuleCreatorWithApps, self).__init__(
module_path,
area,
module_template,
**kwargs
)
self._app_name = kwargs['app_name']
class ModuleCreatorAddAppToModule(ModuleCreatorWithApps):
"""Class for the management of adding a new App to an existing IOC module.
In an old-style module, a single module repository contains multiple IOC
apps. To maintain compatibility, this class exists for the creation of new
apps inside existing modules.
Note:
While the script is called dls_start_new_module, the original svn
script similarly created the new 'app_nameApp' folders in existing
svn 'modules'.
In keeping with the rest of the :class:`ModuleCreator` code, I
continue to use the word 'module' to refer to the git repository (local
or remote) in the documentation, and the 'app' to be the new IOC folder
'app_nameApp' created inside.
From the point of view of the user, however, the 'app_nameApp' folder
itself was considered the 'module', hence the confusing use of eg.
dls_start_new_module for the main script's name.
"""
def verify_remote_repo(self):
"""Verifies there are no name conflicts with the remote repository.
This checks whether or not there are any name conflicts between the
intended module and app names, and the modules that already exist on
the remote repository.
Sets the `_remote_repo_valid` boolean value to True if there are no
conflicts.
This method will fail (raise a VerificationError) if:
- There is no remote repository to clone from
- There is an app_name conflict with one of the remote
paths
Raises:
:class:`~dls_ade.exceptions.VerificationError`: If there is an \
issue with the remote repository.
:class:`~dls_ade.exceptions.RemoteRepoError`: If the given server \
path does not exist.
This should never be raised. There is a bug if it is!
"""
if self._remote_repo_valid:
return
if not self.server.is_server_repo(self._server_repo_path):
err_message = ("The path {path:s} does not exist on server, so "
"cannot clone from it")
err_message = err_message.format(path=self._server_repo_path)
raise VerificationError(err_message)
conflicting_path = self._check_if_remote_repo_has_app(
self._server_repo_path
)
if conflicting_path:
err_message = ("The repository {path:s} has an app that conflicts "
"with app name: {app_name:s}")
err_message = err_message.format(
path=self._server_repo_path,
app_name=self._app_name
)
raise VerificationError(err_message)
self._remote_repo_valid = True
def _check_if_remote_repo_has_app(self, remote_repo_path):
"""Checks if the remote repository contains an app_nameApp folder.
This checks whether or not there is already a folder with the name
"app_nameApp" on the remote repository with the given server
repository path.
Sets the `_remote_repo_valid` boolean value to True if there are no
conflicts.
Returns:
bool: True if app exists, False otherwise.
Raises:
:class:`~dls_ade.exceptions.RemoteRepoError`: If given repo path \
does not exist on server.
This should never be raised. There is a bug if it is!
:class:`~dls_ade.exceptions.VCSGitError`: Issue with the vcs_git \
function calls.
"""
if not self.server.is_server_repo(remote_repo_path):
# This should never get raised!
err_message = ("Remote repo {repo:s} does not exist. Cannot "
"clone to determine if there is an app_name "
"conflict with {app_name:s}")
err_message = err_message.format(repo=remote_repo_path,
app_name=self._app_name)
raise RemoteRepoError(err_message)
temp_dir = ""
exists = False
try:
repo = self.server.temp_clone(remote_repo_path).repo
temp_dir = repo.working_tree_dir
if os.path.exists(os.path.join(temp_dir, self._app_name + "App")):
exists = True
finally:
try:
if temp_dir:
shutil.rmtree(temp_dir)
except OSError:
pass
return exists
def create_local_module(self):
"""Creates the folder structure and files in a cloned git repository.
This will use the file creation specified in
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`.
Raises:
:class:`~dls_ade.exceptions.ArgumentError`: From \
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`
OSError: From \
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`
:class:`~dls_ade.exceptions.VCSGitError`: From \
:func:`~dls_ade.vcs_git.stage_all_files_and_commit`
"""
self.verify_can_create_local_module()
self._can_create_local_module = False
self._usermsg.info("Cloning module to {}".format(self._module_path))
vcs = self.server.clone(self._server_repo_path, self.abs_module_path)
os.chdir(self.abs_module_path)
self._module_template.create_files()
os.chdir(self._cwd)
commit_message = ("Added app, {app_name:s}, to module.".format(
app_name=self._app_name
))
vcs_git.stage_all_files_and_commit(vcs.repo, commit_message)
def push_repo_to_remote(self):
"""Pushes the local repo to the remote server using remote 'origin'.
:class:`~dls_ade.exceptions.VCSGitError`
This will push the master branch of the local repository to the remote
server it was cloned from.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: From \
:meth:`.verify_can_push_repo_to_remote`.
:class:`~dls_ade.exceptions.VCSGitError`: From \
:func:`~dls_ade.vcs_git.push_to_remote`
"""
self.verify_can_push_repo_to_remote()
self._can_push_repo_to_remote = False
vcs = self.server.create_new_local_repo(self._module_name, self._area,
self.abs_module_path)
vcs.push_to_remote()
| dls-controls/dls_ade | dls_ade/module_creator.py | Python | apache-2.0 | 17,511 |
from awxkit.api.resources import resources
from . import base
from . import page
class Setting(base.Base):
pass
page.register_page([resources.setting,
resources.settings_all,
resources.settings_authentication,
resources.settings_changed,
resources.settings_github,
resources.settings_github_org,
resources.settings_github_team,
resources.settings_google_oauth2,
resources.settings_jobs,
resources.settings_ldap,
resources.settings_radius,
resources.settings_saml,
resources.settings_system,
resources.settings_tacacsplus,
resources.settings_ui,
resources.settings_user,
resources.settings_user_defaults], Setting)
class Settings(page.PageList, Setting):
def get_endpoint(self, endpoint):
"""Helper method used to navigate to a specific settings endpoint.
(Pdb) settings_pg.get_endpoint('all')
"""
base_url = '{0}{1}/'.format(self.endpoint, endpoint)
return self.walk(base_url)
get_setting = get_endpoint
page.register_page(resources.settings, Settings)
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/settings.py | Python | apache-2.0 | 1,336 |
import json
import apiai
CLIENT_ACCESS_TOKEN = 'api key'
def nlu(mytext):
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request = ai.text_request()
request.lang = 'en' # optional, default value equal 'en'
# request.session_id = "<SESSION ID, UBIQUE FOR EACH USER>"
request.query = mytext
response = request.getresponse()
string = response.read().decode('utf-8')
json_obj = json.loads(string)
# {'hexagon': '', 'rhombus': '', 'circle': '', 'circle_direction': '', 'rect_direction': 'down', 'rhombus_direction': '', 'triangle': 'Triangle', 'triangle_direction': '', 'square_direction': '', 'square': '', 'hexagon_direction': '', 'rect': '', 'number': [10]}
# if json_obj['result']['action']=='x direction y by z units'
# for i in json_obj:
# print(i,"\t",json_obj[i])
# print(json_obj['result']['metadata']['intentName'])
# print(json_obj['result']['actionIncomplete'])
# print(json_obj['result']['parameters'])
if json_obj['result']['metadata']['intentName'] != 'x direction y by z units':
return json.dumps({"action":json_obj['result']['action'],"message":json_obj['result']['fulfillment']['speech'],'two_figures':False})
else:
a=[]
di=json_obj['result']['parameters']
for i in di :
if di[i]=='':
a.append(i)
for i in a:
del di[i]
# di["two_figures"]=True
result={}
result['by'] = di['number'][0]
del di['number']
# {'number': [10], 'triangle': 'Triangle', 'rect_direction': 'down'}
for i in ['rect_direction','circle_direction','triangle_direction','rhombus_direction','hexagon_direction','square_direction']:
if i in di:
result['second']=i.split('_')[0].lower()
result['direction']=di[i]
del di[i]
for i in di:
result['first']=di[i]
result["two_figures"] = True
print(result)
return json.dumps(result)
# {'two_figures': True, 'first': 'rect', 'second': 'Triangle', 'by': 10, 'direction': 'down'}
# nlu("rectangle below triangle by 10 units")
| mzmmoazam/DrawBot | nlu.py | Python | apache-2.0 | 2,221 |
import argparse
import logging
import os
import sys
from mpi4py import MPI
from pandayoda.yodacore import Yoda
from pandayoda.yodaexe import Droid
import logging
logging.basicConfig(level=logging.DEBUG)
def main(globalWorkDir, localWorkDir):
comm = MPI.COMM_WORLD
mpirank = comm.Get_rank()
# Create separate working directory for each rank
from os.path import abspath as _abspath, join as _join
curdir = _abspath (localWorkDir)
wkdirname = "rank_%s" % str(mpirank)
wkdir = _abspath (_join(curdir,wkdirname))
if not os.path.exists(wkdir):
os.makedirs (wkdir)
os.chdir (wkdir)
if mpirank==0:
yoda = Yoda.Yoda(globalWorkDir, localWorkDir)
yoda.run()
else:
droid = Droid.Droid(globalWorkDir, localWorkDir)
droid.run()
if __name__ == "__main__":
usage = """
usage: %(prog)s <command> [options] [args]
Commands:
help <command> Output help for one of the commands below
"""
oparser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), add_help=True)
oparser.add_argument('--globalWorkingDir', dest="globalWorkingDir", default=None, help="Global share working directory")
oparser.add_argument('--localWorkingDir', dest="localWorkingDir", default=None, help="Local working directory. if it's not set, it will use global working directory")
if len(sys.argv) == 1:
oparser.print_help()
sys.exit(-1)
args = oparser.parse_args(sys.argv[1:])
if args.globalWorkingDir is None:
print "Global working directory is needed."
oparser.print_help()
sys.exit(-1)
if args.localWorkingDir is None:
args.localWorkingDir = args.globalWorkingDir
try:
main(args.globalWorkingDir, args.localWorkingDir)
print "HPCJob-Yoda success"
sys.exit(0)
except Exception as e:
print "HPCJob-Yoda failed"
print(e)
sys.exit(1)
| RRCKI/pilot | HPC/HPCJob.py | Python | apache-2.0 | 1,933 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import subprocess
from collections import OrderedDict
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.tasks.protobuf_parse import ProtobufParse
from pants.backend.codegen.tasks.simple_codegen_task import SimpleCodegenTask
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.binaries.binary_util import BinaryUtil
from pants.fs.archive import ZIP
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
class ProtobufGen(SimpleCodegenTask):
@classmethod
def global_subsystems(cls):
return super(ProtobufGen, cls).global_subsystems() + (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
super(ProtobufGen, cls).register_options(register)
# The protoc version and the plugin names are used as proxies for the identity of the protoc
# executable environment here. Although version is an obvious proxy for the protoc binary
# itself, plugin names are less so and plugin authors must include a version in the name for
# proper invalidation of protobuf products in the face of plugin modification that affects
# plugin outputs.
register('--version', advanced=True, fingerprint=True,
help='Version of protoc. Used to create the default --javadeps and as part of '
'the path to lookup the tool with --pants-support-baseurls and '
'--pants-bootstrapdir. When changing this parameter you may also need to '
'update --javadeps.',
default='2.4.1')
register('--plugins', advanced=True, fingerprint=True, action='append',
help='Names of protobuf plugins to invoke. Protoc will look for an executable '
'named protoc-gen-$NAME on PATH.',
default=[])
register('--extra_path', advanced=True, action='append',
help='Prepend this path onto PATH in the environment before executing protoc. '
'Intended to help protoc find its plugins.',
default=None)
register('--supportdir', advanced=True,
help='Path to use for the protoc binary. Used as part of the path to lookup the'
'tool under --pants-bootstrapdir.',
default='bin/protobuf')
register('--javadeps', advanced=True, action='append',
help='Dependencies to bootstrap this task for generating java code. When changing '
'this parameter you may also need to update --version.',
default=['3rdparty:protobuf-java'])
# TODO https://github.com/pantsbuild/pants/issues/604 prep start
@classmethod
def prepare(cls, options, round_manager):
super(ProtobufGen, cls).prepare(options, round_manager)
round_manager.require_data(JarImportProducts)
round_manager.require_data('deferred_sources')
# TODO https://github.com/pantsbuild/pants/issues/604 prep finish
def __init__(self, *args, **kwargs):
"""Generates Java files from .proto files using the Google protobuf compiler."""
super(ProtobufGen, self).__init__(*args, **kwargs)
self.plugins = self.get_options().plugins
self._extra_paths = self.get_options().extra_path
@memoized_property
def protobuf_binary(self):
binary_util = BinaryUtil.Factory.create()
return binary_util.select_binary(self.get_options().supportdir,
self.get_options().version,
'protoc')
@property
def javadeps(self):
return self.resolve_deps(self.get_options().javadeps)
@property
def synthetic_target_type(self):
return JavaLibrary
def synthetic_target_extra_dependencies(self, target):
deps = OrderedSet()
if target.imported_jars:
# We need to add in the proto imports jars.
jars_address = Address(os.path.relpath(self.codegen_workdir(target), get_buildroot()),
target.id + '-rjars')
jars_target = self.context.add_new_target(jars_address,
JarLibrary,
jars=target.imported_jars,
derived_from=target)
deps.update([jars_target])
deps.update(self.javadeps)
return deps
def is_gentarget(self, target):
return isinstance(target, JavaProtobufLibrary)
@classmethod
def supported_strategy_types(cls):
return [cls.IsolatedCodegenStrategy, cls.ProtobufGlobalCodegenStrategy]
def sources_generated_by_target(self, target):
genfiles = []
for source in target.sources_relative_to_source_root():
path = os.path.join(target.target_base, source)
genfiles.extend(self.calculate_genfiles(path, source))
return genfiles
def execute_codegen(self, targets):
if not targets:
return
sources_by_base = self._calculate_sources(targets)
if self.codegen_strategy.name() == 'isolated':
sources = OrderedSet()
for target in targets:
sources.update(target.sources_relative_to_buildroot())
else:
sources = OrderedSet(itertools.chain.from_iterable(sources_by_base.values()))
if not self.validate_sources_present(sources, targets):
return
bases = OrderedSet(sources_by_base.keys())
bases.update(self._proto_path_imports(targets))
check_duplicate_conflicting_protos(self, sources_by_base, sources, self.context.log)
for target in targets:
# NB(gm): If the strategy is set to 'isolated', then 'targets' should contain only a single
# element, which means this simply sets the output directory depending on that element.
# If the strategy is set to 'global', the target passed in as a parameter here will be
# completely arbitrary, but that's OK because the codegen_workdir function completely
# ignores the target parameter when using a global strategy.
output_dir = self.codegen_workdir(target)
break
gen_flag = '--java_out'
safe_mkdir(output_dir)
gen = '{0}={1}'.format(gen_flag, output_dir)
args = [self.protobuf_binary, gen]
if self.plugins:
for plugin in self.plugins:
# TODO(Eric Ayers) Is it a good assumption that the generated source output dir is
# acceptable for all plugins?
args.append("--{0}_out={1}".format(plugin, output_dir))
for base in bases:
args.append('--proto_path={0}'.format(base))
args.extend(sources)
# Tack on extra path entries. These can be used to find protoc plugins
protoc_environ = os.environ.copy()
if self._extra_paths:
protoc_environ['PATH'] = os.pathsep.join(self._extra_paths
+ protoc_environ['PATH'].split(os.pathsep))
self.context.log.debug('Executing: {0}'.format('\\\n '.join(args)))
process = subprocess.Popen(args, env=protoc_environ)
result = process.wait()
if result != 0:
raise TaskError('{0} ... exited non-zero ({1})'.format(self.protobuf_binary, result))
def _calculate_sources(self, targets):
gentargets = OrderedSet()
def add_to_gentargets(target):
if self.is_gentarget(target):
gentargets.add(target)
self.context.build_graph.walk_transitive_dependency_graph(
[target.address for target in targets],
add_to_gentargets,
postorder=True)
sources_by_base = OrderedDict()
# TODO(Eric Ayers) Extract this logic for general use? When using unpacked_jars it is needed
# to get the correct source root for paths outside the current BUILD tree.
for target in gentargets:
for source in target.sources_relative_to_buildroot():
base = SourceRoot.find_by_path(source)
if not base:
base, _ = target.target_base, target.sources_relative_to_buildroot()
self.context.log.debug('Could not find source root for {source}.'
' Missing call to SourceRoot.register()? Fell back to {base}.'
.format(source=source, base=base))
if base not in sources_by_base:
sources_by_base[base] = OrderedSet()
sources_by_base[base].add(source)
return sources_by_base
def _jars_to_directories(self, target):
"""Extracts and maps jars to directories containing their contents.
:returns: a set of filepaths to directories containing the contents of jar.
"""
files = set()
jar_import_products = self.context.products.get_data(JarImportProducts)
imports = jar_import_products.imports(target)
for coordinate, jar in imports:
files.add(self._extract_jar(coordinate, jar))
return files
def _extract_jar(self, coordinate, jar_path):
"""Extracts the jar to a subfolder of workdir/extracted and returns the path to it."""
with open(jar_path, 'rb') as f:
outdir = os.path.join(self.workdir, 'extracted', sha1(f.read()).hexdigest())
if not os.path.exists(outdir):
ZIP.extract(jar_path, outdir)
self.context.log.debug('Extracting jar {jar} at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
else:
self.context.log.debug('Jar {jar} already extracted at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
return outdir
def _proto_path_imports(self, proto_targets):
for target in proto_targets:
for path in self._jars_to_directories(target):
yield os.path.relpath(path, get_buildroot())
def calculate_genfiles(self, path, source):
protobuf_parse = ProtobufParse(path, source)
protobuf_parse.parse()
return OrderedSet(self.calculate_java_genfiles(protobuf_parse))
def calculate_java_genfiles(self, protobuf_parse):
basepath = protobuf_parse.package.replace('.', os.path.sep)
classnames = {protobuf_parse.outer_class_name}
if protobuf_parse.multiple_files:
classnames |= protobuf_parse.enums | protobuf_parse.messages | protobuf_parse.services | \
set(['{name}OrBuilder'.format(name=m) for m in protobuf_parse.messages])
for classname in classnames:
yield os.path.join(basepath, '{0}.java'.format(classname))
class ProtobufGlobalCodegenStrategy(SimpleCodegenTask.GlobalCodegenStrategy):
def find_sources(self, target):
return self._task.sources_generated_by_target(target)
def _same_contents(a, b):
"""Perform a comparison of the two files"""
with open(a, 'rb') as fp_a, open(b, 'rb') as fp_b:
return fp_a.read() == fp_b.read()
def check_duplicate_conflicting_protos(task, sources_by_base, sources, log):
"""Checks if proto files are duplicate or conflicting.
There are sometimes two files with the same name on the .proto path. This causes the protobuf
compiler to stop with an error. Some repos have legitimate cases for this, and so this task
decides to just choose one to keep the entire build from failing. Sometimes, they are identical
copies. That is harmless, but if there are two files with the same name with different contents,
that is ambiguous and we want to complain loudly.
:param task: provides an implementation of the method calculate_genfiles()
:param dict sources_by_base: mapping of base to path
:param set|OrderedSet sources: set of sources
:param Context.Log log: writes error messages to the console for conflicts
"""
sources_by_genfile = {}
for base in sources_by_base.keys(): # Need to iterate over /original/ bases.
for path in sources_by_base[base]:
if not path in sources:
continue # Check to make sure we haven't already removed it.
source = path[len(base):]
genfiles = task.calculate_genfiles(path, source)
for genfile in genfiles:
if genfile in sources_by_genfile:
# Possible conflict!
prev = sources_by_genfile[genfile]
if not prev in sources:
# Must have been culled by an earlier pass.
continue
if not _same_contents(path, prev):
log.error('Proto conflict detected (.proto files are different):\n'
'1: {prev}\n2: {curr}'.format(prev=prev, curr=path))
else:
log.warn('Proto duplication detected (.proto files are identical):\n'
'1: {prev}\n2: {curr}'.format(prev=prev, curr=path))
log.warn(' Arbitrarily favoring proto 1.')
if path in sources:
sources.remove(path) # Favor the first version.
continue
sources_by_genfile[genfile] = path
| kslundberg/pants | src/python/pants/backend/codegen/tasks/protobuf_gen.py | Python | apache-2.0 | 13,284 |
# -*- coding: utf-8 -*-
"""
Copyright 2010 cloudControl UG (haftungsbeschraenkt)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
from pycclib.cclib import GoneError, NotImplementedError, ForbiddenError
from pycclib.cclib import ConflictDuplicateError
from pycclib import cclib
from cctrl.error import PasswordsDontMatchException, InputErrorException, \
messages
from cctrl.auth import get_credentials, set_user_config, get_user_config
from cctrl.output import print_keys
from cctrl.common import get_email_and_password
from output import print_key
from oshelpers import readContentOf
from keyhelpers import is_key_valid, ask_user_to_use_default_ssh_public_key, \
create_new_default_ssh_keys, get_default_ssh_key_path
class UserController(object):
"""
This controller handles all user related actions.
"""
api = None
def __init__(self, api, settings):
self.api = api
self.settings = settings
def checktoken(self, args):
try:
self.api.read_users()
except cclib.TokenRequiredError:
sys.exit(1)
sys.exit(0)
def create(self, args):
"""
Create a new user.
"""
if not self.settings.user_registration_enabled:
print messages['RegisterDisabled'].format(self.settings.user_registration_url)
return
self.api.set_token(None)
if args.name and args.email and args.password:
name = args.name[0]
email = args.email[0]
password = args.password[0]
else:
name = raw_input('Username: ')
try:
email, password = get_credentials(self.settings, create=True)
except PasswordsDontMatchException:
return
try:
self.api.create_user(name, email, password)
except NotImplementedError:
raise InputErrorException('CommandNotImplemented')
print messages['UserCreatedNowCheckEmail']
def activate(self, args):
"""
Activate a new user using the information from the
activation email.
"""
self.api.set_token(None)
try:
self.api.update_user(
args.user_name[0],
activation_code=args.activation_code[0])
except GoneError:
raise InputErrorException('WrongUsername')
except NotImplementedError:
raise InputErrorException('CommandNotImplemented')
def delete(self, args):
"""
Delete your user account.
"""
users = self.api.read_users()
if not args.force_delete:
question = raw_input('Do you really want to delete your user? ' +
'Type "Yes" without the quotes to delete: ')
else:
question = 'Yes'
if question.lower() == 'yes':
try:
self.api.delete_user(users[0]['username'])
except NotImplementedError:
raise InputErrorException('CommandNotImplemented')
except ForbiddenError:
raise InputErrorException('DeleteAppsBeforeUser')
# After we have deleted our user we should also delete
# the token_file to avoid confusion
self.api.set_token(None)
else:
raise InputErrorException('SecurityQuestionDenied')
def addKey(self, args):
"""
Add a given public key to cloudControl user account.
"""
default_key_path = get_default_ssh_key_path()
# Possibility #1: User is providing a non-default SSH key
key_to_read = args.public_key
if not is_key_valid(key_to_read):
# Possibility #2: Try the default RSA public key
print >> sys.stderr, "Key '{0}' seems to not be a RSA public key or not found!".format(key_to_read)
ask_user_to_use_default_ssh_public_key()
# Possibility #3: All failed! Let's just create new keys for user!
if not is_key_valid(default_key_path):
if key_to_read != default_key_path:
print >> sys.stderr, "Default key '{0}' seems to not be a RSA public key or not found!".format(default_key_path)
create_new_default_ssh_keys()
# We've filtered all cases: the key must be the default one!
key_to_read = default_key_path
# Good, we have the key! Now, read the content of the key!
public_rsa_key_content = readContentOf(key_to_read)
# Add public RSA-key to cloudControl user account
try:
users = self.api.read_users()
self.api.create_user_key(
users[0]['username'],
public_rsa_key_content)
except ConflictDuplicateError:
raise InputErrorException('KeyDuplicate')
def listKeys(self, args):
"""
List your public keys.
"""
users = self.api.read_users()
if args.id:
key = self.api.read_user_key(users[0]['username'], args.id)
print_key(key)
else:
keys = self.api.read_user_keys(users[0]['username'])
print_keys(keys)
def removeKey(self, args):
"""
Remove one of your public keys specified by key_id.
listKeys() shows the key_ids.
"""
users = self.api.read_users()
if not args.force_delete:
question = raw_input('Do you really want to remove your key? ' +
'Type "Yes" without the quotes to remove: ')
else:
question = 'Yes'
if question.lower() == 'yes':
self.api.delete_user_key(users[0]['username'], args.id[0])
else:
raise InputErrorException('SecurityQuestionDenied')
def logout(self, args):
"""
Logout a user by deleting the token.json file.
"""
self.api.set_token(None)
def registerAddon(self, args):
file_content = readContentOf(args.manifest)
email, password = get_email_and_password(self.settings)
try:
self.api.register_addon(email, password, json.loads(file_content))
except cclib.UnauthorizedError:
sys.exit(messages['NotAuthorized'])
except cclib.ForbiddenError, e:
sys.exit(messages['NotAllowed'])
except cclib.ConnectionException:
sys.exit(messages['APIUnreachable'])
except Exception as e:
sys.exit(e)
def setup(self, args):
user_config = get_user_config(self.settings)
ssh_key_path = self._get_setup_ssh_key_path(user_config, args)
if not is_key_valid(ssh_key_path):
# If given key path is not default and does not exist
# we raise an error
if ssh_key_path != get_default_ssh_key_path():
raise InputErrorException('WrongPublicKey')
# If given key path was the default one, we create the key
# pair for the user
print >> sys.stderr, "Key '{0}' seems to not be a RSA public key or not found!".format(ssh_key_path)
create_new_default_ssh_keys()
ssh_key_content = readContentOf(ssh_key_path)
ssh_auth = self._get_setup_ssh_auth(self.settings, user_config, args)
if args.email:
set_user_config(self.settings, email=args.email)
try:
users = self.api.read_users()
self.api.create_user_key(
users[0]['username'],
ssh_key_content)
except ConflictDuplicateError:
# Key already added, nothing to do.
pass
set_user_config(self.settings,
ssh_auth=ssh_auth,
ssh_path=ssh_key_path)
def _get_setup_ssh_key_path(self, user_config, args):
if args.ssh_key_path:
return os.path.abspath(args.ssh_key_path)
if user_config.get('ssh_path'):
return user_config.get('ssh_path')
return get_default_ssh_key_path()
def _get_setup_ssh_auth(self, settings, user_config, args):
if not settings.ssh_auth:
return False
if args.ssh_auth:
return args.ssh_auth == 'yes'
return user_config.get('ssh_auth', True)
| cloudControl/cctrl | cctrl/user.py | Python | apache-2.0 | 8,933 |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from etcd import EtcdKeyNotFound, EtcdAlreadyExist, EtcdCompareFailed
from netaddr import IPAddress, IPNetwork
import socket
import logging
import random
from pycalico.datastore_datatypes import IPPool
from pycalico.datastore import DatastoreClient
from pycalico.datastore import (IPAM_HOST_AFFINITY_PATH,
IPAM_BLOCK_PATH,
IPAM_HANDLE_PATH)
from pycalico.datastore_errors import DataStoreError, PoolNotFound
from pycalico.block import (AllocationBlock,
get_block_cidr_for_address,
BLOCK_PREFIXLEN,
AlreadyAssignedError,
AddressNotAssignedError,
NoHostAffinityWarning)
from pycalico.handle import (AllocationHandle,
AddressCountTooLow)
from pycalico.util import get_hostname
_log = logging.getLogger(__name__)
_log.addHandler(logging.NullHandler())
RETRIES = 100
KEY_ERROR_RETRIES = 3
class BlockHandleReaderWriter(DatastoreClient):
"""
Can read and write allocation blocks and handles to the data store, as well
as related bits of state.
This class keeps etcd specific code from being in the main IPAMClient
class.
"""
def _read_block(self, block_cidr):
"""
Read the block from the data store.
:param block_cidr: The IPNetwork identifier for a block.
:return: An AllocationBlock object
"""
key = _block_datastore_key(block_cidr)
try:
# Use quorum=True to ensure we don't get stale reads. Without this
# we allow many subtle race conditions, such as creating a block,
# then later reading it and finding it doesn't exist.
result = self.etcd_client.read(key, quorum=True)
except EtcdKeyNotFound:
raise KeyError(str(block_cidr))
block = AllocationBlock.from_etcd_result(result)
return block
def _compare_and_swap_block(self, block):
"""
Write the block using an atomic Compare-and-swap.
"""
# If the block has a db_result, CAS against that.
if block.db_result is not None:
_log.debug("CAS Update block %s", block)
try:
self.etcd_client.update(block.update_result())
except EtcdCompareFailed:
raise CASError(str(block.cidr))
else:
_log.debug("CAS Write new block %s", block)
key = _block_datastore_key(block.cidr)
value = block.to_json()
try:
self.etcd_client.write(key, value, prevExist=False)
except EtcdAlreadyExist:
raise CASError(str(block.cidr))
def _get_affine_blocks(self, host, version, pool):
"""
Get the blocks for which this host has affinity.
:param host: The host name to get affinity for.
:param version: 4 for IPv4, 6 for IPv6.
:param pool: Limit blocks to a specific pool, or pass None to find all
blocks for the specified version.
"""
# Construct the path
path = IPAM_HOST_AFFINITY_PATH % {"hostname": host,
"version": version}
block_ids = []
try:
result = self.etcd_client.read(path, quorum=True).children
for child in result:
packed = child.key.split("/")
if len(packed) == 9:
# block_ids are encoded 192.168.1.0/24 -> 192.168.1.0-24
# in etcd.
block_ids.append(IPNetwork(packed[8].replace("-", "/")))
except EtcdKeyNotFound:
# Means the path is empty.
pass
# If pool specified, filter to only include ones in the pool.
if pool is not None:
assert isinstance(pool, IPPool)
block_ids = [cidr for cidr in block_ids if cidr in pool]
return block_ids
def _new_affine_block(self, host, version, pool):
"""
Create and register a new affine block for the host.
:param host: The host name to get a block for.
:param version: 4 for IPv4, 6 for IPv6.
:param pool: Limit blocks to a specific pool, or pass None to find all
blocks for the specified version.
:return: The block CIDR of the new block.
"""
# Get the pools and verify we got a valid one, or none.
ip_pools = self.get_ip_pools(version, ipam=True)
if pool is not None:
if pool not in ip_pools:
raise ValueError("Requested pool %s is not configured or has"
"wrong attributes" % pool)
# Confine search to only the one pool.
ip_pools = [pool]
for pool in ip_pools:
for block_cidr in pool.cidr.subnet(BLOCK_PREFIXLEN[version]):
block_id = str(block_cidr)
_log.debug("Checking if block %s is free.", block_id)
key = _block_datastore_key(block_cidr)
try:
_ = self.etcd_client.read(key, quorum=True)
except EtcdKeyNotFound:
_log.debug("Found block %s free.", block_id)
try:
self._claim_block_affinity(host, block_cidr)
except HostAffinityClaimedError:
# Failed to claim the block because some other host
# has it.
_log.debug("Failed to claim block %s", block_cidr)
continue
# Success!
return block_cidr
raise NoFreeBlocksError()
def _claim_block_affinity(self, host, block_cidr):
"""
Claim a block we think is free.
"""
block_id = str(block_cidr)
path = IPAM_HOST_AFFINITY_PATH % {"hostname": host,
"version": block_cidr.version}
key = path + block_id.replace("/", "-")
self.etcd_client.write(key, "")
# Create the block.
block = AllocationBlock(block_cidr, host)
try:
self._compare_and_swap_block(block)
except CASError:
# Block exists. Read it back to find out its host affinity
block = self._read_block(block_cidr)
if block.host_affinity == host:
# Block is now claimed by us. Some other process on this host
# must have claimed it.
_log.debug("Block %s already claimed by us. Success.",
block_cidr)
return
# Some other host beat us to claiming this block. Clean up.
self.etcd_client.delete(key)
# Throw a key error to let the caller know the block wasn't free
# after all.
raise HostAffinityClaimedError("Block %s already claimed by %s",
block_id, block.host_affinity)
# successfully created the block. Done.
return
def _random_blocks(self, excluded_ids, version, pool):
"""
Get an list of block CIDRs, in random order.
:param excluded_ids: List of IDs that should be excluded.
:param version: The IP version 4, or 6.
:param pool: IPPool to get blocks from, or None to use all pools
:return: An iterator of block CIDRs.
"""
# Get the pools and verify we got a valid one, or none.
ip_pools = self.get_ip_pools(version, ipam=True)
if pool is not None:
if pool not in ip_pools:
raise ValueError("Requested pool %s is not configured or has"
"wrong attributes" % pool)
# Confine search to only the one pool.
ip_pools = [pool]
random_blocks = []
i = 0
for pool in ip_pools:
for block_cidr in pool.cidr.subnet(BLOCK_PREFIXLEN[version]):
if block_cidr not in excluded_ids:
# add this block. We use an "inside-out" Fisher-Yates
# shuffle to randomize the list as we create it. See
# http://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
j = random.randint(0, i)
if j != i:
random_blocks.append(random_blocks[j])
random_blocks[j] = block_cidr
else:
random_blocks.append(block_cidr)
i += 1
return random_blocks
def _increment_handle(self, handle_id, block_cidr, amount):
"""
Increment the allocation count on the given handle for the given block
by the given amount.
"""
for _ in xrange(RETRIES):
try:
handle = self._read_handle(handle_id)
except KeyError:
# handle doesn't exist. Create it.
handle = AllocationHandle(handle_id)
_ = handle.increment_block(block_cidr, amount)
try:
self._compare_and_swap_handle(handle)
except CASError:
# CAS failed. Retry.
continue
else:
# success!
return
raise RuntimeError("Max retries hit.") # pragma: no cover
def _decrement_handle(self, handle_id, block_cidr, amount):
"""
Decrement the allocation count on the given handle for the given block
by the given amount.
"""
for _ in xrange(RETRIES):
try:
handle = self._read_handle(handle_id)
except KeyError:
# This is bad. The handle doesn't exist, which means something
# really wrong has happened, like DB corruption.
_log.error("Can't decrement block %s on handle %s; it doesn't "
"exist.", str(block_cidr), handle_id)
raise
try:
handle.decrement_block(block_cidr, amount)
except AddressCountTooLow:
# This is also bad. The handle says it has fewer than the
# requested amount of addresses allocated on the block. This
# means the DB is corrupted.
_log.error("Can't decrement block %s on handle %s; too few "
"allocated.", str(block_cidr), handle_id)
raise
try:
self._compare_and_swap_handle(handle)
except CASError:
continue
else:
# Success!
return
raise RuntimeError("Max retries hit.") # pragma: no cover
def _read_handle(self, handle_id):
"""
Read the handle with the given handle ID from the data store.
:param handle_id: The handle ID to read.
:return: AllocationHandle object.
"""
key = _handle_datastore_key(handle_id)
try:
result = self.etcd_client.read(key, quorum=True)
except EtcdKeyNotFound:
raise KeyError(handle_id)
handle = AllocationHandle.from_etcd_result(result)
return handle
def _compare_and_swap_handle(self, handle):
"""
Write the handle using an atomic Compare-and-swap.
"""
# If the handle has a db_result, CAS against that.
if handle.db_result is not None:
_log.debug("Handle %s exists.", handle.handle_id)
if handle.is_empty():
# Handle is now empty. Delete it instead of an update.
_log.debug("Handle %s is empty.", handle.handle_id)
key = _handle_datastore_key(handle.handle_id)
try:
self.etcd_client.delete(
key,
prevIndex=handle.db_result.modifiedIndex)
except EtcdAlreadyExist:
raise CASError(handle.handle_id)
else:
_log.debug("Handle %s is not empty.", handle.handle_id)
try:
self.etcd_client.update(handle.update_result())
except EtcdCompareFailed:
raise CASError(handle.handle_id)
else:
_log.debug("CAS Write new handle %s", handle.handle_id)
assert not handle.is_empty(), "Don't write empty handle."
key = _handle_datastore_key(handle.handle_id)
value = handle.to_json()
try:
self.etcd_client.write(key, value, prevExist=False)
except EtcdAlreadyExist:
raise CASError(handle.handle_id)
class CASError(DataStoreError):
"""
Compare-and-swap atomic update failed.
"""
pass
class NoFreeBlocksError(DataStoreError):
"""
Tried to get a new block but there are none available.
"""
pass
class HostAffinityClaimedError(DataStoreError):
"""
Tried to set the host affinity of a block which already has a host that
claims affinity.
"""
pass
def _block_datastore_key(block_cidr):
"""
Translate a block_id into a datastore key.
:param block_cidr: IPNetwork representing the block
:return: etcd key as string.
"""
path = IPAM_BLOCK_PATH % {'version': block_cidr.version}
return path + str(block_cidr).replace("/", "-")
def _handle_datastore_key(handle_id):
"""
Translate a handle_id into a datastore key.
:param handle_id: String key
:return: etcd key as string.
"""
return IPAM_HANDLE_PATH + handle_id
class IPAMClient(BlockHandleReaderWriter):
def auto_assign_ips(self, num_v4, num_v6, handle_id, attributes,
pool=(None, None), hostname=None):
"""
Automatically pick and assign the given number of IPv4 and IPv6
addresses.
:param num_v4: Number of IPv4 addresses to request
:param num_v6: Number of IPv6 addresses to request
:param handle_id: allocation handle ID for this request. You can query
this key using get_assignments_by_handle() or release all addresses
with this key using release_by_handle().
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param pool: (optional) tuple of (v4 pool, v6 pool); if supplied, the
pool(s) to assign from, If None, automatically choose a pool.
:param hostname: (optional) the hostname to use for affinity in
assigning IP addresses. Defaults to the hostname returned by get_hostname().
:return: A tuple of (v4_address_list, v6_address_list). When IPs in
configured pools are at or near exhaustion, this method may return
fewer than requested addresses.
"""
assert isinstance(handle_id, str) or handle_id is None
if not hostname:
hostname = get_hostname()
_log.info("Auto-assign %d IPv4, %d IPv6 addrs",
num_v4, num_v6)
v4_address_list = self._auto_assign(4, num_v4, handle_id, attributes,
pool[0], hostname)
_log.info("Auto-assigned IPv4s %s",
[str(addr) for addr in v4_address_list])
v6_address_list = self._auto_assign(6, num_v6, handle_id, attributes,
pool[1], hostname)
_log.info("Auto-assigned IPv6s %s",
[str(addr) for addr in v6_address_list])
return v4_address_list, v6_address_list
def _auto_assign(self, ip_version, num, handle_id,
attributes, pool, hostname):
"""
Auto assign addresses from a specific IP version.
Hosts automatically register themselves as the owner of a block the
first time they request an auto-assigned IP. For auto-assignment, a
host will allocate from a block it owns, or if all their currently
owned blocks get full, it will register itself as the owner of a new
block. If all blocks are owned, and all the host's own blocks are
full, it will pick blocks at random until it can fulfil the request.
If you're really, really out of addresses, it will fail the request.
:param ip_version: 4 or 6, the IP version number.
:param num: Number of addresses to assign.
:param handle_id: allocation handle ID for this request.
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param pool: (optional) if supplied, the pool to assign from, If None,
automatically choose a pool.
:param hostname: The hostname to use for affinity in assigning IP
addresses.
:return:
"""
assert isinstance(handle_id, str) or handle_id is None
block_list = self._get_affine_blocks(hostname,
ip_version,
pool)
block_ids = list(block_list)
key_errors = 0
allocated_ips = []
num_remaining = num
while num_remaining > 0:
try:
block_id = block_ids.pop(0)
except IndexError:
_log.info("Ran out of affine blocks for %s in pool %s",
hostname, pool)
break
try:
ips = self._auto_assign_block(block_id,
num_remaining,
handle_id,
attributes)
except KeyError:
# In certain rare race conditions, _get_affine_blocks above
# can return block_ids that don't exist (due to multiple IPAM
# clients on this host running simultaneously). If that
# happens, requeue the block_id for a retry, since we expect
# the other IPAM client to shortly create the block. To stop
# endless looping we limit the number of KeyErrors that will
# generate a retry.
_log.warning("Tried to auto-assign to block %s. Doesn't "
"exist.", block_id)
key_errors += 1
if key_errors <= KEY_ERROR_RETRIES:
_log.debug("Queueing block %s for retry.", block_id)
block_ids.append(block_id)
else:
_log.warning("Stopping retry of block %s.", block_id)
continue
except NoHostAffinityWarning:
# In certain rare race conditions, _get_affine_blocks above
# can return block_ids that don't actually have affinity to
# this host (due to multiple IPAM clients on this host running
# simultaneously). If that happens, just move to the next one.
_log.warning("No host affinity on block %s; skipping.",
block_id)
continue
allocated_ips.extend(ips)
num_remaining = num - len(allocated_ips)
# If there are still addresses to allocate, then we've run out of
# blocks with affinity. Try to fullfil address request by allocating
# new blocks.
retries = RETRIES
while num_remaining > 0 and retries > 0:
retries -= 1
try:
new_block = self._new_affine_block(hostname,
ip_version,
pool)
# If successful, this creates the block and registers it to us.
except NoFreeBlocksError:
_log.info("Could not get new host affinity block for %s in "
"pool %s", hostname, pool)
break
ips = self._auto_assign_block(new_block,
num_remaining,
handle_id,
attributes)
allocated_ips.extend(ips)
num_remaining = num - len(allocated_ips)
if retries == 0: # pragma: no cover
raise RuntimeError("Hit Max Retries.")
# If there are still addresses to allocate, we've now tried all blocks
# with some affinity to us, and tried (and failed) to allocate new
# ones. Our last option is a random hunt through any blocks we haven't
# yet tried.
if num_remaining > 0:
random_blocks = iter(self._random_blocks(block_list,
ip_version,
pool))
while num_remaining > 0:
try:
block_id = random_blocks.next()
except StopIteration:
_log.warning("All addresses exhausted in pool %s", pool)
break
ips = self._auto_assign_block(block_id,
num_remaining,
handle_id,
attributes,
affinity_check=False)
allocated_ips.extend(ips)
num_remaining = num - len(allocated_ips)
return allocated_ips
def _auto_assign_block(self, block_cidr, num, handle_id, attributes,
affinity_check=True):
"""
Automatically pick IPs from a block and commit them to the data store.
:param block_cidr: The identifier for the block to read.
:param num: The number of IPs to assign.
:param handle_id: allocation handle ID for this request.
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param affinity_check: True to enable checking the host has the
affinity to the block, False to disable this check, for example, while
randomly searching after failure to get affine block.
:return: List of assigned IPs.
"""
assert isinstance(handle_id, str) or handle_id is None
_log.debug("Auto-assigning from block %s", block_cidr)
for i in xrange(RETRIES):
_log.debug("Auto-assign from %s, retry %d", block_cidr, i)
block = self._read_block(block_cidr)
unconfirmed_ips = block.auto_assign(num=num,
handle_id=handle_id,
attributes=attributes,
affinity_check=affinity_check)
if len(unconfirmed_ips) == 0:
_log.debug("Block %s is full.", block_cidr)
return []
# If using a handle, increment the handle by the number of
# confirmed IPs.
if handle_id is not None:
self._increment_handle(handle_id,
block_cidr,
len(unconfirmed_ips))
try:
self._compare_and_swap_block(block)
except CASError:
_log.debug("CAS failed on block %s", block_cidr)
if handle_id is not None:
self._decrement_handle(handle_id,
block_cidr,
len(unconfirmed_ips))
else:
return unconfirmed_ips
raise RuntimeError("Hit Max Retries.")
def assign_ip(self, address, handle_id, attributes, hostname=None):
"""
Assign the given address. Throws AlreadyAssignedError if the address
is taken.
:param address: IPAddress to assign.
:param handle_id: allocation handle ID for this request. You can
query this key using get_assignments_by_handle() or release all
addresses with this handle_id using release_by_handle().
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param hostname: (optional) the hostname to use for affinity if the
block containing the IP address has no host affinity. Defaults to the
hostname returned by get_hostname().
:return: None.
"""
assert isinstance(handle_id, str) or handle_id is None
assert isinstance(address, IPAddress)
if not hostname:
hostname = get_hostname()
block_cidr = get_block_cidr_for_address(address)
for _ in xrange(RETRIES):
try:
block = self._read_block(block_cidr)
except KeyError:
_log.debug("Block %s doesn't exist.", block_cidr)
pools = self.get_ip_pools(address.version, ipam=True)
if any([address in pool for pool in pools]):
_log.debug("Create and claim block %s.",
block_cidr)
try:
self._claim_block_affinity(hostname,
block_cidr)
except HostAffinityClaimedError:
_log.debug("Someone else claimed block %s before us.",
block_cidr)
continue
# Block exists now, retry writing to it.
_log.debug("Claimed block %s", block_cidr)
continue
else:
raise ValueError("%s is not in any configured pool" %
address)
# Try to assign. Throws exception if already assigned -- let it.
block.assign(address, handle_id, attributes)
# If using a handle, increment by one IP
if handle_id is not None:
self._increment_handle(handle_id, block_cidr, 1)
# Try to commit.
try:
self._compare_and_swap_block(block)
return # Success!
except CASError:
_log.debug("CAS failed on block %s", block_cidr)
if handle_id is not None:
self._decrement_handle(handle_id,
block_cidr,
1)
raise RuntimeError("Hit max retries.")
def release_ips(self, addresses):
"""
Release the given addresses.
:param addresses: Set of IPAddresses to release (ok to mix IPv4 and
IPv6).
:return: Set of addresses that were already unallocated.
"""
assert isinstance(addresses, (set, frozenset))
_log.info("Releasing addresses %s", [str(addr) for addr in addresses])
unallocated = set()
# sort the addresses into blocks
addrs_by_block = {}
for address in addresses:
block_cidr = get_block_cidr_for_address(address)
addrs = addrs_by_block.setdefault(block_cidr, set())
addrs.add(address)
# loop through blocks, CAS releasing.
for block_cidr, addresses in addrs_by_block.iteritems():
unalloc_block = self._release_block(block_cidr, addresses)
unallocated = unallocated.union(unalloc_block)
return unallocated
def _release_block(self, block_cidr, addresses):
"""
Release the given addresses from the block, using compare-and-swap to
write the block.
:param block_cidr: IPNetwork identifying the block
:param addresses: List of addresses to release.
:return: List of addresses that were already unallocated.
"""
_log.debug("Releasing %d adddresses from block %s",
len(addresses), block_cidr)
for _ in xrange(RETRIES):
try:
block = self._read_block(block_cidr)
except KeyError:
_log.debug("Block %s doesn't exist.", block_cidr)
# OK to return, all addresses must be released already.
return addresses
(unallocated, handles) = block.release(addresses)
assert len(unallocated) <= len(addresses)
if len(unallocated) == len(addresses):
# All the addresses are already unallocated.
return addresses
# Try to commit
try:
self._compare_and_swap_block(block)
except CASError:
continue
else:
# Success! Decrement handles.
for handle_id, amount in handles.iteritems():
if handle_id is not None:
# Skip the None handle, it's a special value meaning
# the addresses were not allocated with a handle.
self._decrement_handle(handle_id, block_cidr, amount)
return unallocated
raise RuntimeError("Hit Max retries.") # pragma: no cover
def get_ip_assignments_by_handle(self, handle_id):
"""
Return a list of IPAddresses assigned to the key.
:param handle_id: Key to query e.g. used on assign_ip() or
auto_assign_ips().
:return: List of IPAddresses
"""
assert isinstance(handle_id, str)
handle = self._read_handle(handle_id) # Can throw KeyError, let it.
ip_assignments = []
for block_str in handle.block:
block_cidr = IPNetwork(block_str)
try:
block = self._read_block(block_cidr)
except KeyError:
_log.warning("Couldn't read block %s referenced in handle %s.",
block_str, handle_id)
continue
ips = block.get_ip_assignments_by_handle(handle_id)
ip_assignments.extend(ips)
return ip_assignments
def release_ip_by_handle(self, handle_id):
"""
Release all addresses assigned to the key.
:param handle_id: Key to query, e.g. used on assign_ip() or
auto_assign_ips().
:return: None.
"""
assert isinstance(handle_id, str)
handle = self._read_handle(handle_id) # Can throw KeyError, let it.
# Loop through blocks, releasing.
for block_str in handle.block:
block_cidr = IPNetwork(block_str)
self._release_ip_by_handle_block(handle_id, block_cidr)
def _release_ip_by_handle_block(self, handle_id, block_cidr):
"""
Release all address in a block with the given handle ID.
:param handle_id: The handle ID to find addresses with.
:param block_cidr: The block to release addresses on.
:return: None
"""
for _ in xrange(RETRIES):
try:
block = self._read_block(block_cidr)
except KeyError:
# Block doesn't exist, so all addresses are already
# unallocated. This can happen if the handle is overestimating
# the number of assigned addresses, which is a transient, but
# expected condition.
return
num_release = block.release_by_handle(handle_id)
if num_release == 0:
# Block didn't have any addresses with this handle, so all
# so all addresses are already unallocated. This can happen if
# the handle is overestimating the number of assigned
# addresses, which is a transient, but expected condition.
return
try:
self._compare_and_swap_block(block)
except CASError:
# Failed to update, retry.
continue
# Successfully updated block, update the handle if necessary.
if handle_id is not None:
# Skip the None handle, it's a special value meaning
# the addresses were not allocated with a handle.
self._decrement_handle(handle_id, block_cidr, num_release)
return
raise RuntimeError("Hit Max retries.") # pragma: no cover
def get_assignment_attributes(self, address):
"""
Return the attributes of a given address.
:param address: IPAddress to query.
:return: The attributes for the address as passed to auto_assign() or
assign().
"""
assert isinstance(address, IPAddress)
block_cidr = get_block_cidr_for_address(address)
try:
block = self._read_block(block_cidr)
except KeyError:
_log.warning("Couldn't read block %s for requested address %s",
block_cidr, address)
raise AddressNotAssignedError("%s is not assigned." % address)
else:
_, attributes = block.get_attributes_for_ip(address)
return attributes
def assign_address(self, pool, address):
"""
Deprecated in favor of assign_ip().
Attempt to assign an IPAddress in a pool.
Fails if the address is already assigned.
The directory for storing assignments in this pool must already exist.
:param IPPool or IPNetwork pool: The pool that the assignment is from.
If pool is None, get the pool from datastore
:param IPAddress address: The address to assign.
:return: True if the allocation succeeds, false otherwise. An
exception is thrown for any error conditions.
:rtype: bool
"""
pool = pool or self.get_pool(address)
if pool is None:
raise PoolNotFound("IP address %s does not belong to any "
"configured pools" % address)
if isinstance(pool, IPPool):
pool = pool.cidr
assert isinstance(pool, IPNetwork)
assert isinstance(address, IPAddress)
try:
self.assign_ip(address, None, {})
return True
except AlreadyAssignedError:
return False
# Other exceptions indicate error conditions.
def unassign_address(self, pool, address):
"""
Deprecated in favor of release_ips()
Unassign an IP from a pool.
:param IPPool or IPNetwork pool: The pool that the assignment is from.
If the pool is None, get the pool from datastore
:param IPAddress address: The address to unassign.
:return: True if the address was unassigned, false otherwise. An
exception is thrown for any error conditions.
:rtype: bool
"""
pool = pool or self.get_pool(address)
if pool is None:
raise PoolNotFound("IP address %s does not belong to any "
"configured pools" % address)
if isinstance(pool, IPPool):
pool = pool.cidr
assert isinstance(pool, IPNetwork)
assert isinstance(address, IPAddress)
err = self.release_ips({address})
if err:
return False
else:
return True
| alexhersh/libcalico | calico_containers/pycalico/ipam.py | Python | apache-2.0 | 36,358 |
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import click
from aim import config as aim_cfg
from aim import context
from aim.db import api
from aim.tools.cli.groups import aimcli
@aimcli.aim.group(name='config')
@click.pass_context
def config(ctx):
aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
ctx.obj['manager'] = aim_cfg.ConfigManager(aim_ctx, '')
@config.command(name='update')
@click.argument('host', required=False)
@click.pass_context
def update(ctx, host):
"""Current database version."""
host = host or ''
ctx.obj['manager'].to_db(ctx.obj['conf'], host=host)
@config.command(name='replace')
@click.argument('host', required=False)
@click.pass_context
def replace(ctx, host):
"""Used for upgrading database."""
host = host or ''
ctx.obj['manager'].replace_all(ctx.obj['conf'], host=host)
| noironetworks/aci-integration-module | aim/tools/cli/commands/config.py | Python | apache-2.0 | 1,450 |
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .api_gateway_event import *
| gogoair/foremast | src/foremast/awslambda/api_gateway_event/__init__.py | Python | apache-2.0 | 661 |
import unittest
from os import path
from API.directoryscanner import find_runs_in_directory
path_to_module = path.abspath(path.dirname(__file__))
class TestDirectoryScanner(unittest.TestCase):
def test_sample_names_spaces(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-names-with-spaces"))
self.assertEqual(1, len(runs))
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertEqual(sample.get_id(), sample.get_id().strip())
def test_single_end(self):
runs = find_runs_in_directory(path.join(path_to_module, "single_end"))
self.assertEqual(1, len(runs))
self.assertEqual("SINGLE_END", runs[0].metadata["layoutType"])
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertFalse(sample.is_paired_end())
def test_completed_upload(self):
runs = find_runs_in_directory(path.join(path_to_module, "completed"))
self.assertEqual(0, len(runs))
def test_find_sample_sheet_name_variations(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-sheet-name-variations"))
self.assertEqual(1, len(runs))
| phac-nml/irida-miseq-uploader | Tests/unitTests/test_directoryscanner.py | Python | apache-2.0 | 1,282 |
#!/usr/bin/env python
"""Messages are written to stdout instead of the normal stderr.
"""
import logging
from themelog import init_log
logger = logging.getLogger()
init_log(stdout=True)
logger.debug('This is a debug message')
logger.info('This is a info message')
logger.warning('This is a warning message')
logger.error('This is a error message')
logger.critical('This is a critical message')
| mjem/themelog | tests/test_stdout.py | Python | apache-2.0 | 395 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ZerigoDNSDriver'
]
import copy
import base64
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from xml.etree import ElementTree as ET
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.utils.xml import findtext, findall
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.types import MalformedResponseError
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
API_HOST = 'ns.zerigo.com'
API_VERSION = '1.1'
API_ROOT = '/api/%s/' % (API_VERSION)
VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers']
VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority']
# Number of items per page (maximum limit is 1000)
ITEMS_PER_PAGE = 100
class ZerigoError(LibcloudError):
def __init__(self, code, errors):
self.code = code
self.errors = errors or []
def __str__(self):
return 'Errors: %s' % (', '.join(self.errors))
def __repr__(self):
return ('<ZerigoError response code=%s, errors count=%s>' % (
self.code, len(self.errors)))
class ZerigoDNSResponse(XmlResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
status = int(self.status)
if status == 401:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
elif status == 404:
context = self.connection.context
if context['resource'] == 'zone':
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=context['id'])
elif context['resource'] == 'record':
raise RecordDoesNotExistError(value='', driver=self,
record_id=context['id'])
elif status != 503:
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError('Failed to parse XML',
body=self.body)
errors = []
for error in findall(element=body, xpath='error'):
errors.append(error.text)
raise ZerigoError(code=status, errors=errors)
return self.body
class ZerigoDNSConnection(ConnectionUserAndKey):
host = API_HOST
secure = True
responseCls = ZerigoDNSResponse
def add_default_headers(self, headers):
auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8'))
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': 'application/xml; charset=UTF-8'}
return super(ZerigoDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class ZerigoDNSDriver(DNSDriver):
type = Provider.ZERIGO
name = 'Zerigo DNS'
website = 'http://www.zerigo.com/'
connectionCls = ZerigoDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.REDIRECT: 'REDIRECT',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
RecordType.NAPTR: 'NAPTR',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SPF: 'SPF',
RecordType.GEO: 'GEO',
RecordType.URL: 'URL',
}
def iterate_zones(self):
return self._get_more('zones')
def iterate_records(self, zone):
return self._get_more('records', zone=zone)
def get_zone(self, zone_id):
path = API_ROOT + 'zones/%s.xml' % (zone_id)
self.connection.set_context({'resource': 'zone', 'id': zone_id})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
self.connection.set_context({'resource': 'record', 'id': record_id})
path = API_ROOT + 'hosts/%s.xml' % (record_id)
data = self.connection.request(path).object
record = self._to_record(elem=data, zone=zone)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/create
@inherits: :class:`DNSDriver.create_zone`
"""
path = API_ROOT + 'zones.xml'
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
data = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='POST').object
zone = self._to_zone(elem=data)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
"""
Update an existing zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/update
@inherits: :class:`DNSDriver.update_zone`
"""
if domain:
raise LibcloudError('Domain cannot be changed', driver=self)
path = API_ROOT + 'zones/%s.xml' % (zone.id)
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
valid_keys=VALID_ZONE_EXTRA_PARAMS,
extra=extra)
updated_zone = get_new_obj(obj=zone, klass=Zone,
attributes={'type': type,
'ttl': ttl,
'extra': merged})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/hosts/create
@inherits: :class:`DNSDriver.create_record`
"""
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(record_elem),
method='POST')
assert response.status == httplib.CREATED
record = self._to_record(elem=response.object, zone=zone)
return record
def update_record(self, record, name=None, type=None, data=None,
extra=None):
path = API_ROOT + 'hosts/%s.xml' % (record.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(record_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(record.extra),
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra)
updated_record = get_new_obj(obj=record, klass=Record,
attributes={'type': type,
'data': data,
'extra': merged})
return updated_record
def delete_zone(self, zone):
path = API_ROOT + 'zones/%s.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(action=path, method='DELETE')
return response.status == httplib.OK
def delete_record(self, record):
path = API_ROOT + 'hosts/%s.xml' % (record.id)
self.connection.set_context({'resource': 'record', 'id': record.id})
response = self.connection.request(action=path, method='DELETE')
return response.status == httplib.OK
def ex_get_zone_by_domain(self, domain):
"""
Retrieve a zone object by the domain name.
:param domain: The domain which should be used
:type domain: ``str``
:rtype: :class:`Zone`
"""
path = API_ROOT + 'zones/%s.xml' % (domain)
self.connection.set_context({'resource': 'zone', 'id': domain})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def ex_force_slave_axfr(self, zone):
"""
Force a zone transfer.
:param zone: Zone which should be used.
:type zone: :class:`Zone`
:rtype: :class:`Zone`
"""
path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(path, method='POST')
assert response.status == httplib.ACCEPTED
return zone
def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None):
zone_elem = ET.Element('zone', {})
if domain:
domain_elem = ET.SubElement(zone_elem, 'domain')
domain_elem.text = domain
if type:
ns_type_elem = ET.SubElement(zone_elem, 'ns-type')
if type == 'master':
ns_type_elem.text = 'pri_sec'
elif type == 'slave':
if not extra or 'ns1' not in extra:
raise LibcloudError('ns1 extra attribute is required ' +
'when zone type is slave', driver=self)
ns_type_elem.text = 'sec'
ns1_elem = ET.SubElement(zone_elem, 'ns1')
ns1_elem.text = extra['ns1']
elif type == 'std_master':
# TODO: Each driver should provide supported zone types
# Slave name servers are elsewhere
if not extra or 'slave-nameservers' not in extra:
raise LibcloudError('slave-nameservers extra ' +
'attribute is required whenzone ' +
'type is std_master', driver=self)
ns_type_elem.text = 'pri'
slave_nameservers_elem = ET.SubElement(zone_elem,
'slave-nameservers')
slave_nameservers_elem.text = extra['slave-nameservers']
if ttl:
default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl')
default_ttl_elem.text = str(ttl)
if extra and 'tag-list' in extra:
tags = extra['tag-list']
tags_elem = ET.SubElement(zone_elem, 'tag-list')
tags_elem.text = ' '.join(tags)
return zone_elem
def _to_record_elem(self, name=None, type=None, data=None, extra=None):
record_elem = ET.Element('host', {})
if name:
name_elem = ET.SubElement(record_elem, 'hostname')
name_elem.text = name
if type is not None:
type_elem = ET.SubElement(record_elem, 'host-type')
type_elem.text = self.RECORD_TYPE_MAP[type]
if data:
data_elem = ET.SubElement(record_elem, 'data')
data_elem.text = data
if extra:
if 'ttl' in extra:
ttl_elem = ET.SubElement(record_elem, 'ttl',
{'type': 'integer'})
ttl_elem.text = str(extra['ttl'])
if 'priority' in extra:
# Only MX and SRV records support priority
priority_elem = ET.SubElement(record_elem, 'priority',
{'type': 'integer'})
priority_elem.text = str(extra['priority'])
if 'notes' in extra:
notes_elem = ET.SubElement(record_elem, 'notes')
notes_elem.text = extra['notes']
return record_elem
def _to_zones(self, elem):
zones = []
for item in findall(element=elem, xpath='zone'):
zone = self._to_zone(elem=item)
zones.append(zone)
return zones
def _to_zone(self, elem):
id = findtext(element=elem, xpath='id')
domain = findtext(element=elem, xpath='domain')
type = findtext(element=elem, xpath='ns-type')
type = 'master' if type.find('pri') == 0 else 'slave'
ttl = findtext(element=elem, xpath='default-ttl')
hostmaster = findtext(element=elem, xpath='hostmaster')
custom_ns = findtext(element=elem, xpath='custom-ns')
custom_nameservers = findtext(element=elem, xpath='custom-nameservers')
notes = findtext(element=elem, xpath='notes')
nx_ttl = findtext(element=elem, xpath='nx-ttl')
slave_nameservers = findtext(element=elem, xpath='slave-nameservers')
tags = findtext(element=elem, xpath='tag-list')
tags = tags.split(' ') if tags else []
extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns,
'custom-nameservers': custom_nameservers, 'notes': notes,
'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers,
'tags': tags}
zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
driver=self, extra=extra)
return zone
def _to_records(self, elem, zone):
records = []
for item in findall(element=elem, xpath='host'):
record = self._to_record(elem=item, zone=zone)
records.append(record)
return records
def _to_record(self, elem, zone):
id = findtext(element=elem, xpath='id')
name = findtext(element=elem, xpath='hostname')
type = findtext(element=elem, xpath='host-type')
type = self._string_to_record_type(type)
data = findtext(element=elem, xpath='data')
notes = findtext(element=elem, xpath='notes', no_text_value=None)
state = findtext(element=elem, xpath='state', no_text_value=None)
fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None)
priority = findtext(element=elem, xpath='priority', no_text_value=None)
ttl = findtext(element=elem, xpath='ttl', no_text_value=None)
if not name:
name = None
if ttl:
ttl = int(ttl)
extra = {'notes': notes, 'state': state, 'fqdn': fqdn,
'priority': priority, 'ttl': ttl}
record = Record(id=id, name=name, type=type, data=data,
zone=zone, driver=self, extra=extra)
return record
def _get_more(self, rtype, **kwargs):
exhausted = False
last_key = None
while not exhausted:
items, last_key, exhausted = self._get_data(rtype, last_key,
**kwargs)
for item in items:
yield item
def _get_data(self, rtype, last_key, **kwargs):
# Note: last_key in this case really is a "last_page".
# TODO: Update base driver and change last_key to something more
# generic - e.g. marker
params = {}
params['per_page'] = ITEMS_PER_PAGE
params['page'] = last_key + 1 if last_key else 1
if rtype == 'zones':
path = API_ROOT + 'zones.xml'
response = self.connection.request(path)
transform_func = self._to_zones
elif rtype == 'records':
zone = kwargs['zone']
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(path, params=params)
transform_func = self._to_records
exhausted = False
result_count = int(response.headers.get('x-query-count', 0))
if (params['page'] * ITEMS_PER_PAGE) >= result_count:
exhausted = True
if response.status == httplib.OK:
items = transform_func(elem=response.object, **kwargs)
return items, params['page'], exhausted
else:
return [], None, True
| ClusterHQ/libcloud | libcloud/dns/drivers/zerigo.py | Python | apache-2.0 | 18,252 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from desktop.lib.test_utils import clear_sys_caches
from desktop.lib.django_test_util import make_logged_in_client
from oozie.models2 import Node
from oozie.tests import OozieMockBase
from liboozie.conf import USE_LIBPATH_FOR_JARS
from liboozie.submission2 import Submission
LOG = logging.getLogger(__name__)
@attr('requires_hadoop')
def test_copy_files():
cluster = pseudo_hdfs4.shared_cluster()
try:
c = make_logged_in_client()
user = User.objects.get(username='test')
prefix = '/tmp/test_copy_files'
if cluster.fs.exists(prefix):
cluster.fs.rmtree(prefix)
# Jars in various locations
deployment_dir = '%s/workspace' % prefix
external_deployment_dir = '%s/deployment' % prefix
jar_1 = '%s/udf1.jar' % prefix
jar_2 = '%s/lib/udf2.jar' % prefix
jar_3 = '%s/udf3.jar' % deployment_dir
jar_4 = '%s/lib/udf4.jar' % deployment_dir # Doesn't move
jar_5 = 'udf5.jar'
jar_6 = 'lib/udf6.jar' # Doesn't move
cluster.fs.mkdir(prefix)
cluster.fs.create(jar_1)
cluster.fs.create(jar_2)
cluster.fs.create(jar_3)
cluster.fs.create(jar_4)
cluster.fs.create(deployment_dir + '/' + jar_5)
cluster.fs.create(deployment_dir + '/' + jar_6)
class MockJob():
XML_FILE_NAME = 'workflow.xml'
def __init__(self):
self.deployment_dir = deployment_dir
self.nodes = [
Node({'id': '1', 'type': 'mapreduce', 'properties': {'jar_path': jar_1}}),
Node({'id': '2', 'type': 'mapreduce', 'properties': {'jar_path': jar_2}}),
Node({'id': '3', 'type': 'java', 'properties': {'jar_path': jar_3}}),
Node({'id': '4', 'type': 'java', 'properties': {'jar_path': jar_4}}),
# Workspace relative paths
Node({'id': '5', 'type': 'java', 'properties': {'jar_path': jar_5}}),
Node({'id': '6', 'type': 'java', 'properties': {'jar_path': jar_6}})
]
submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt)
submission._copy_files(deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'})
submission._copy_files(external_deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'})
assert_true(cluster.fs.exists(deployment_dir + '/workflow.xml'), deployment_dir)
assert_true(cluster.fs.exists(deployment_dir + '/job.properties'), deployment_dir)
# All sources still there
assert_true(cluster.fs.exists(jar_1))
assert_true(cluster.fs.exists(jar_2))
assert_true(cluster.fs.exists(jar_3))
assert_true(cluster.fs.exists(jar_4))
assert_true(cluster.fs.exists(deployment_dir + '/' + jar_5))
assert_true(cluster.fs.exists(deployment_dir + '/' + jar_6))
# Lib
deployment_dir = deployment_dir + '/lib'
external_deployment_dir = external_deployment_dir + '/lib'
if USE_LIBPATH_FOR_JARS.get():
assert_true(jar_1 in submission.properties['oozie.libpath'])
assert_true(jar_2 in submission.properties['oozie.libpath'])
assert_true(jar_3 in submission.properties['oozie.libpath'])
assert_true(jar_4 in submission.properties['oozie.libpath'])
print deployment_dir + '/' + jar_5
assert_true((deployment_dir + '/' + jar_5) in submission.properties['oozie.libpath'], submission.properties['oozie.libpath'])
assert_true((deployment_dir + '/' + jar_6) in submission.properties['oozie.libpath'], submission.properties['oozie.libpath'])
else:
list_dir_workspace = cluster.fs.listdir(deployment_dir)
list_dir_deployement = cluster.fs.listdir(external_deployment_dir)
# All destinations there
assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf5.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf6.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf5.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf6.jar'), list_dir_deployement)
stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
stats_udf5 = cluster.fs.stats(deployment_dir + '/udf5.jar')
stats_udf6 = cluster.fs.stats(deployment_dir + '/udf6.jar')
submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>", {'prop1': 'val1'})
assert_not_equal(stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
assert_not_equal(stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
assert_not_equal(stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
assert_not_equal(stats_udf5['fileId'], cluster.fs.stats(deployment_dir + '/udf5.jar')['fileId'])
assert_equal(stats_udf6['fileId'], cluster.fs.stats(deployment_dir + '/udf6.jar')['fileId'])
# Test _create_file()
submission._create_file(deployment_dir, 'test.txt', data='Test data')
assert_true(cluster.fs.exists(deployment_dir + '/test.txt'), list_dir_workspace)
finally:
try:
cluster.fs.rmtree(prefix)
except:
LOG.exception('failed to remove %s' % prefix)
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
class MockJt():
def __init__(self, logical_name=None):
self.logical_name = logical_name if logical_name else ''
class TestSubmission(OozieMockBase):
def test_get_properties(self):
submission = Submission(self.user, fs=MockFs())
assert_equal({'security_enabled': False}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'curacao:8032',
'nameNode': 'hdfs://curacao:8020',
'security_enabled': False
}, submission.properties)
def test_get_logical_properties(self):
submission = Submission(self.user, fs=MockFs(logical_name='fsname'), jt=MockJt(logical_name='jtname'))
assert_equal({'security_enabled': False}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'jtname',
'nameNode': 'fsname',
'security_enabled': False
}, submission.properties)
def test_update_properties(self):
finish = []
finish.append(MR_CLUSTERS.set_for_testing({'default': {}}))
finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
finish.append(YARN_CLUSTERS.set_for_testing({'default': {}}))
finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
try:
properties = {
'user.name': 'hue',
'test.1': 'http://localhost/test?test1=test&test2=test',
'nameNode': 'hdfs://curacao:8020',
'jobTracker': 'jtaddress',
'security_enabled': False
}
final_properties = properties.copy()
submission = Submission(None, properties=properties, oozie_id='test', fs=MockFs())
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jtaddress',
'nameNode': fs.fs_defaultfs
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jobtracker',
'nameNode': 'namenode'
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finally:
clear_sys_caches()
for reset in finish:
reset()
def test_get_external_parameters(self):
xml = """
<workflow-app name="Pig" xmlns="uri:oozie:workflow:0.4">
<start to="Pig"/>
<action name="Pig">
<pig>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<script>aggregate.pig</script>
<argument>-param</argument>
<argument>INPUT=${input}</argument>
<argument>-param</argument>
<argument>OUTPUT=${output}</argument>
<configuration>
<property>
<name>mapred.input.format.class</name>
<value>org.apache.hadoop.examples.SleepJob$SleepInputFormat</value>
</property>
</configuration>
</pig>
<ok to="end"/>
<error to="kill"/>
</action>
<kill name="kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
"""
properties = """
#
# Licensed to the Hue
#
nameNode=hdfs://localhost:8020
jobTracker=localhost:8021
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/pig
"""
parameters = Submission(self.user)._get_external_parameters(xml, properties)
assert_equal({'oozie.use.system.libpath': 'true',
'input': '',
'jobTracker': 'localhost:8021',
'oozie.wf.application.path': '${nameNode}/user/${user.name}/${examplesRoot}/apps/pig',
'examplesRoot': 'examples',
'output': '',
'nameNode': 'hdfs://localhost:8020',
'queueName': 'default'
},
parameters)
| fangxingli/hue | desktop/libs/liboozie/src/liboozie/submittion2_tests.py | Python | apache-2.0 | 12,471 |
from sys import argv
script, filename = argv
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename,'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1:")
line2 = raw_input("line 2:")
line3 = raw_input("line 3:")
print "I'm going to write these to the file."
content = "%s\n%s\n%s\n" % (line1, line2, line3)
target.write(content)
print "And finally, we close it."
target.close() | CodeSheng/LPLHW | ex16-3.py | Python | apache-2.0 | 619 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type
from google.cloud.aiplatform_v1.types import feature as gca_feature
from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector
from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore
from google.cloud.aiplatform_v1.types import io
from google.cloud.aiplatform_v1.types import operation
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"CreateFeaturestoreRequest",
"GetFeaturestoreRequest",
"ListFeaturestoresRequest",
"ListFeaturestoresResponse",
"UpdateFeaturestoreRequest",
"DeleteFeaturestoreRequest",
"ImportFeatureValuesRequest",
"ImportFeatureValuesResponse",
"BatchReadFeatureValuesRequest",
"ExportFeatureValuesRequest",
"DestinationFeatureSetting",
"FeatureValueDestination",
"ExportFeatureValuesResponse",
"BatchReadFeatureValuesResponse",
"CreateEntityTypeRequest",
"GetEntityTypeRequest",
"ListEntityTypesRequest",
"ListEntityTypesResponse",
"UpdateEntityTypeRequest",
"DeleteEntityTypeRequest",
"CreateFeatureRequest",
"BatchCreateFeaturesRequest",
"BatchCreateFeaturesResponse",
"GetFeatureRequest",
"ListFeaturesRequest",
"ListFeaturesResponse",
"SearchFeaturesRequest",
"SearchFeaturesResponse",
"UpdateFeatureRequest",
"DeleteFeatureRequest",
"CreateFeaturestoreOperationMetadata",
"UpdateFeaturestoreOperationMetadata",
"ImportFeatureValuesOperationMetadata",
"ExportFeatureValuesOperationMetadata",
"BatchReadFeatureValuesOperationMetadata",
"CreateEntityTypeOperationMetadata",
"CreateFeatureOperationMetadata",
"BatchCreateFeaturesOperationMetadata",
},
)
class CreateFeaturestoreRequest(proto.Message):
r"""Request message for
[FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore].
Attributes:
parent (str):
Required. The resource name of the Location to create
Featurestores. Format:
``projects/{project}/locations/{location}'``
featurestore (google.cloud.aiplatform_v1.types.Featurestore):
Required. The Featurestore to create.
featurestore_id (str):
Required. The ID to use for this Featurestore, which will
become the final component of the Featurestore's resource
name.
This value may be up to 60 characters, and valid characters
are ``[a-z0-9_]``. The first character cannot be a number.
The value must be unique within the project and location.
"""
parent = proto.Field(proto.STRING, number=1,)
featurestore = proto.Field(
proto.MESSAGE, number=2, message=gca_featurestore.Featurestore,
)
featurestore_id = proto.Field(proto.STRING, number=3,)
class GetFeaturestoreRequest(proto.Message):
r"""Request message for
[FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore].
Attributes:
name (str):
Required. The name of the Featurestore
resource.
"""
name = proto.Field(proto.STRING, number=1,)
class ListFeaturestoresRequest(proto.Message):
r"""Request message for
[FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores].
Attributes:
parent (str):
Required. The resource name of the Location to list
Featurestores. Format:
``projects/{project}/locations/{location}``
filter (str):
Lists the featurestores that match the filter expression.
The following fields are supported:
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``online_serving_config.fixed_node_count``: Supports
``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=``
comparisons.
- ``labels``: Supports key-value equality and key presence.
Examples:
- ``create_time > "2020-01-01" OR update_time > "2020-01-01"``
Featurestores created or updated after 2020-01-01.
- ``labels.env = "prod"`` Featurestores with label "env"
set to "prod".
page_size (int):
The maximum number of Featurestores to
return. The service may return fewer than this
value. If unspecified, at most 100 Featurestores
will be returned. The maximum value is 100; any
value greater than 100 will be coerced to 100.
page_token (str):
A page token, received from a previous
[FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]
call. Provide this to retrieve the subsequent page.
When paginating, all other parameters provided to
[FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]
must match the call that provided the page token.
order_by (str):
A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending. Supported Fields:
- ``create_time``
- ``update_time``
- ``online_serving_config.fixed_node_count``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
order_by = proto.Field(proto.STRING, number=5,)
read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask_pb2.FieldMask,)
class ListFeaturestoresResponse(proto.Message):
r"""Response message for
[FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores].
Attributes:
featurestores (Sequence[google.cloud.aiplatform_v1.types.Featurestore]):
The Featurestores matching the request.
next_page_token (str):
A token, which can be sent as
[ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1.ListFeaturestoresRequest.page_token]
to retrieve the next page. If this field is omitted, there
are no subsequent pages.
"""
@property
def raw_page(self):
return self
featurestores = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_featurestore.Featurestore,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateFeaturestoreRequest(proto.Message):
r"""Request message for
[FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore].
Attributes:
featurestore (google.cloud.aiplatform_v1.types.Featurestore):
Required. The Featurestore's ``name`` field is used to
identify the Featurestore to be updated. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Field mask is used to specify the fields to be overwritten
in the Featurestore resource by the update. The fields
specified in the update_mask are relative to the resource,
not the full request. A field will be overwritten if it is
in the mask. If the user does not provide a mask then only
the non-empty fields present in the request will be
overwritten. Set the update_mask to ``*`` to override all
fields.
Updatable fields:
- ``labels``
- ``online_serving_config.fixed_node_count``
"""
featurestore = proto.Field(
proto.MESSAGE, number=1, message=gca_featurestore.Featurestore,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteFeaturestoreRequest(proto.Message):
r"""Request message for
[FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore].
Attributes:
name (str):
Required. The name of the Featurestore to be deleted.
Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
force (bool):
If set to true, any EntityTypes and Features
for this Featurestore will also be deleted.
(Otherwise, the request will only work if the
Featurestore has no EntityTypes.)
"""
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
class ImportFeatureValuesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
avro_source (google.cloud.aiplatform_v1.types.AvroSource):
This field is a member of `oneof`_ ``source``.
bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource):
This field is a member of `oneof`_ ``source``.
csv_source (google.cloud.aiplatform_v1.types.CsvSource):
This field is a member of `oneof`_ ``source``.
feature_time_field (str):
Source column that holds the Feature
timestamp for all Feature values in each entity.
This field is a member of `oneof`_ ``feature_time_source``.
feature_time (google.protobuf.timestamp_pb2.Timestamp):
Single Feature timestamp for all entities
being imported. The timestamp must not have
higher than millisecond precision.
This field is a member of `oneof`_ ``feature_time_source``.
entity_type (str):
Required. The resource name of the EntityType grouping the
Features for which values are being imported. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``
entity_id_field (str):
Source column that holds entity IDs. If not provided, entity
IDs are extracted from the column named ``entity_id``.
feature_specs (Sequence[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest.FeatureSpec]):
Required. Specifications defining which Feature values to
import from the entity. The request fails if no
feature_specs are provided, and having multiple
feature_specs for one Feature is not allowed.
disable_online_serving (bool):
If set, data will not be imported for online
serving. This is typically used for backfilling,
where Feature generation timestamps are not in
the timestamp range needed for online serving.
worker_count (int):
Specifies the number of workers that are used
to write data to the Featurestore. Consider the
online serving capacity that you require to
achieve the desired import throughput without
interfering with online serving. The value must
be positive, and less than or equal to 100. If
not set, defaults to using 1 worker. The low
count ensures minimal impact on online serving
performance.
"""
class FeatureSpec(proto.Message):
r"""Defines the Feature value(s) to import.
Attributes:
id (str):
Required. ID of the Feature to import values
of. This Feature must exist in the target
EntityType, or the request will fail.
source_field (str):
Source column to get the Feature values from.
If not set, uses the column with the same name
as the Feature ID.
"""
id = proto.Field(proto.STRING, number=1,)
source_field = proto.Field(proto.STRING, number=2,)
avro_source = proto.Field(
proto.MESSAGE, number=2, oneof="source", message=io.AvroSource,
)
bigquery_source = proto.Field(
proto.MESSAGE, number=3, oneof="source", message=io.BigQuerySource,
)
csv_source = proto.Field(
proto.MESSAGE, number=4, oneof="source", message=io.CsvSource,
)
feature_time_field = proto.Field(
proto.STRING, number=6, oneof="feature_time_source",
)
feature_time = proto.Field(
proto.MESSAGE,
number=7,
oneof="feature_time_source",
message=timestamp_pb2.Timestamp,
)
entity_type = proto.Field(proto.STRING, number=1,)
entity_id_field = proto.Field(proto.STRING, number=5,)
feature_specs = proto.RepeatedField(proto.MESSAGE, number=8, message=FeatureSpec,)
disable_online_serving = proto.Field(proto.BOOL, number=9,)
worker_count = proto.Field(proto.INT32, number=11,)
class ImportFeatureValuesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues].
Attributes:
imported_entity_count (int):
Number of entities that have been imported by
the operation.
imported_feature_value_count (int):
Number of Feature values that have been
imported by the operation.
invalid_row_count (int):
The number of rows in input source that weren't imported due
to either
- Not having any featureValues.
- Having a null entityId.
- Having a null timestamp.
- Not being parsable (applicable for CSV sources).
"""
imported_entity_count = proto.Field(proto.INT64, number=1,)
imported_feature_value_count = proto.Field(proto.INT64, number=2,)
invalid_row_count = proto.Field(proto.INT64, number=6,)
class BatchReadFeatureValuesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
csv_read_instances (google.cloud.aiplatform_v1.types.CsvSource):
Each read instance consists of exactly one read timestamp
and one or more entity IDs identifying entities of the
corresponding EntityTypes whose Features are requested.
Each output instance contains Feature values of requested
entities concatenated together as of the read time.
An example read instance may be
``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``.
An example output instance may be
``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``.
Timestamp in each read instance must be millisecond-aligned.
``csv_read_instances`` are read instances stored in a
plain-text CSV file. The header should be:
[ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp
The columns can be in any order.
Values in the timestamp column must use the RFC 3339 format,
e.g. ``2012-07-30T10:43:17.123Z``.
This field is a member of `oneof`_ ``read_option``.
bigquery_read_instances (google.cloud.aiplatform_v1.types.BigQuerySource):
Similar to csv_read_instances, but from BigQuery source.
This field is a member of `oneof`_ ``read_option``.
featurestore (str):
Required. The resource name of the Featurestore from which
to query Feature values. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
destination (google.cloud.aiplatform_v1.types.FeatureValueDestination):
Required. Specifies output location and
format.
pass_through_fields (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.PassThroughField]):
When not empty, the specified fields in the
\*_read_instances source will be joined as-is in the output,
in addition to those fields from the Featurestore Entity.
For BigQuery source, the type of the pass-through values
will be automatically inferred. For CSV source, the
pass-through values will be passed as opaque bytes.
entity_type_specs (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]):
Required. Specifies EntityType grouping Features to read
values of and settings. Each EntityType referenced in
[BatchReadFeatureValuesRequest.entity_type_specs] must have
a column specifying entity IDs in the EntityType in
[BatchReadFeatureValuesRequest.request][] .
"""
class PassThroughField(proto.Message):
r"""Describe pass-through fields in read_instance source.
Attributes:
field_name (str):
Required. The name of the field in the CSV header or the
name of the column in BigQuery table. The naming restriction
is the same as
[Feature.name][google.cloud.aiplatform.v1.Feature.name].
"""
field_name = proto.Field(proto.STRING, number=1,)
class EntityTypeSpec(proto.Message):
r"""Selects Features of an EntityType to read values of and
specifies read settings.
Attributes:
entity_type_id (str):
Required. ID of the EntityType to select Features. The
EntityType id is the
[entity_type_id][google.cloud.aiplatform.v1.CreateEntityTypeRequest.entity_type_id]
specified during EntityType creation.
feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector):
Required. Selectors choosing which Feature
values to read from the EntityType.
settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]):
Per-Feature settings for the batch read.
"""
entity_type_id = proto.Field(proto.STRING, number=1,)
feature_selector = proto.Field(
proto.MESSAGE, number=2, message=gca_feature_selector.FeatureSelector,
)
settings = proto.RepeatedField(
proto.MESSAGE, number=3, message="DestinationFeatureSetting",
)
csv_read_instances = proto.Field(
proto.MESSAGE, number=3, oneof="read_option", message=io.CsvSource,
)
bigquery_read_instances = proto.Field(
proto.MESSAGE, number=5, oneof="read_option", message=io.BigQuerySource,
)
featurestore = proto.Field(proto.STRING, number=1,)
destination = proto.Field(
proto.MESSAGE, number=4, message="FeatureValueDestination",
)
pass_through_fields = proto.RepeatedField(
proto.MESSAGE, number=8, message=PassThroughField,
)
entity_type_specs = proto.RepeatedField(
proto.MESSAGE, number=7, message=EntityTypeSpec,
)
class ExportFeatureValuesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
snapshot_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.SnapshotExport):
Exports the latest Feature values of all
entities of the EntityType within a time range.
This field is a member of `oneof`_ ``mode``.
full_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.FullExport):
Exports all historical values of all entities
of the EntityType within a time range
This field is a member of `oneof`_ ``mode``.
entity_type (str):
Required. The resource name of the EntityType from which to
export Feature values. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
destination (google.cloud.aiplatform_v1.types.FeatureValueDestination):
Required. Specifies destination location and
format.
feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector):
Required. Selects Features to export values
of.
settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]):
Per-Feature export settings.
"""
class SnapshotExport(proto.Message):
r"""Describes exporting the latest Feature values of all entities of the
EntityType between [start_time, snapshot_time].
Attributes:
snapshot_time (google.protobuf.timestamp_pb2.Timestamp):
Exports Feature values as of this timestamp.
If not set, retrieve values as of now.
Timestamp, if present, must not have higher than
millisecond precision.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Excludes Feature values with feature
generation timestamp before this timestamp. If
not set, retrieve oldest values kept in Feature
Store. Timestamp, if present, must not have
higher than millisecond precision.
"""
snapshot_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
start_time = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
class FullExport(proto.Message):
r"""Describes exporting all historical Feature values of all entities of
the EntityType between [start_time, end_time].
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
Excludes Feature values with feature
generation timestamp before this timestamp. If
not set, retrieve oldest values kept in Feature
Store. Timestamp, if present, must not have
higher than millisecond precision.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Exports Feature values as of this timestamp.
If not set, retrieve values as of now.
Timestamp, if present, must not have higher than
millisecond precision.
"""
start_time = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
snapshot_export = proto.Field(
proto.MESSAGE, number=3, oneof="mode", message=SnapshotExport,
)
full_export = proto.Field(
proto.MESSAGE, number=7, oneof="mode", message=FullExport,
)
entity_type = proto.Field(proto.STRING, number=1,)
destination = proto.Field(
proto.MESSAGE, number=4, message="FeatureValueDestination",
)
feature_selector = proto.Field(
proto.MESSAGE, number=5, message=gca_feature_selector.FeatureSelector,
)
settings = proto.RepeatedField(
proto.MESSAGE, number=6, message="DestinationFeatureSetting",
)
class DestinationFeatureSetting(proto.Message):
r"""
Attributes:
feature_id (str):
Required. The ID of the Feature to apply the
setting to.
destination_field (str):
Specify the field name in the export
destination. If not specified, Feature ID is
used.
"""
feature_id = proto.Field(proto.STRING, number=1,)
destination_field = proto.Field(proto.STRING, number=2,)
class FeatureValueDestination(proto.Message):
r"""A destination location for Feature values and format.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination):
Output in BigQuery format.
[BigQueryDestination.output_uri][google.cloud.aiplatform.v1.BigQueryDestination.output_uri]
in
[FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1.FeatureValueDestination.bigquery_destination]
must refer to a table.
This field is a member of `oneof`_ ``destination``.
tfrecord_destination (google.cloud.aiplatform_v1.types.TFRecordDestination):
Output in TFRecord format.
Below are the mapping from Feature value type in
Featurestore to Feature value type in TFRecord:
::
Value type in Featurestore | Value type in TFRecord
DOUBLE, DOUBLE_ARRAY | FLOAT_LIST
INT64, INT64_ARRAY | INT64_LIST
STRING, STRING_ARRAY, BYTES | BYTES_LIST
true -> byte_string("true"), false -> byte_string("false")
BOOL, BOOL_ARRAY (true, false) | BYTES_LIST
This field is a member of `oneof`_ ``destination``.
csv_destination (google.cloud.aiplatform_v1.types.CsvDestination):
Output in CSV format. Array Feature value
types are not allowed in CSV format.
This field is a member of `oneof`_ ``destination``.
"""
bigquery_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message=io.BigQueryDestination,
)
tfrecord_destination = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message=io.TFRecordDestination,
)
csv_destination = proto.Field(
proto.MESSAGE, number=3, oneof="destination", message=io.CsvDestination,
)
class ExportFeatureValuesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues].
"""
class BatchReadFeatureValuesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues].
"""
class CreateEntityTypeRequest(proto.Message):
r"""Request message for
[FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType].
Attributes:
parent (str):
Required. The resource name of the Featurestore to create
EntityTypes. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
entity_type (google.cloud.aiplatform_v1.types.EntityType):
The EntityType to create.
entity_type_id (str):
Required. The ID to use for the EntityType, which will
become the final component of the EntityType's resource
name.
This value may be up to 60 characters, and valid characters
are ``[a-z0-9_]``. The first character cannot be a number.
The value must be unique within a featurestore.
"""
parent = proto.Field(proto.STRING, number=1,)
entity_type = proto.Field(
proto.MESSAGE, number=2, message=gca_entity_type.EntityType,
)
entity_type_id = proto.Field(proto.STRING, number=3,)
class GetEntityTypeRequest(proto.Message):
r"""Request message for
[FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType].
Attributes:
name (str):
Required. The name of the EntityType resource. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListEntityTypesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes].
Attributes:
parent (str):
Required. The resource name of the Featurestore to list
EntityTypes. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
filter (str):
Lists the EntityTypes that match the filter expression. The
following filters are supported:
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``>=``, and ``<=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``>=``, and ``<=`` comparisons. Values must be in RFC
3339 format.
- ``labels``: Supports key-value equality as well as key
presence.
Examples:
- ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"``
--> EntityTypes created or updated after
2020-01-31T15:30:00.000000Z.
- ``labels.active = yes AND labels.env = prod`` -->
EntityTypes having both (active: yes) and (env: prod)
labels.
- ``labels.env: *`` --> Any EntityType which has a label
with 'env' as the key.
page_size (int):
The maximum number of EntityTypes to return.
The service may return fewer than this value. If
unspecified, at most 1000 EntityTypes will be
returned. The maximum value is 1000; any value
greater than 1000 will be coerced to 1000.
page_token (str):
A page token, received from a previous
[FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]
call. Provide this to retrieve the subsequent page.
When paginating, all other parameters provided to
[FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]
must match the call that provided the page token.
order_by (str):
A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending.
Supported fields:
- ``entity_type_id``
- ``create_time``
- ``update_time``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
order_by = proto.Field(proto.STRING, number=5,)
read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask_pb2.FieldMask,)
class ListEntityTypesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes].
Attributes:
entity_types (Sequence[google.cloud.aiplatform_v1.types.EntityType]):
The EntityTypes matching the request.
next_page_token (str):
A token, which can be sent as
[ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1.ListEntityTypesRequest.page_token]
to retrieve the next page. If this field is omitted, there
are no subsequent pages.
"""
@property
def raw_page(self):
return self
entity_types = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_entity_type.EntityType,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateEntityTypeRequest(proto.Message):
r"""Request message for
[FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType].
Attributes:
entity_type (google.cloud.aiplatform_v1.types.EntityType):
Required. The EntityType's ``name`` field is used to
identify the EntityType to be updated. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Field mask is used to specify the fields to be overwritten
in the EntityType resource by the update. The fields
specified in the update_mask are relative to the resource,
not the full request. A field will be overwritten if it is
in the mask. If the user does not provide a mask then only
the non-empty fields present in the request will be
overwritten. Set the update_mask to ``*`` to override all
fields.
Updatable fields:
- ``description``
- ``labels``
- ``monitoring_config.snapshot_analysis.disabled``
- ``monitoring_config.snapshot_analysis.monitoring_interval``
"""
entity_type = proto.Field(
proto.MESSAGE, number=1, message=gca_entity_type.EntityType,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteEntityTypeRequest(proto.Message):
r"""Request message for [FeaturestoreService.DeleteEntityTypes][].
Attributes:
name (str):
Required. The name of the EntityType to be deleted. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
force (bool):
If set to true, any Features for this
EntityType will also be deleted. (Otherwise, the
request will only work if the EntityType has no
Features.)
"""
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
class CreateFeatureRequest(proto.Message):
r"""Request message for
[FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature].
Attributes:
parent (str):
Required. The resource name of the EntityType to create a
Feature. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
feature (google.cloud.aiplatform_v1.types.Feature):
Required. The Feature to create.
feature_id (str):
Required. The ID to use for the Feature, which will become
the final component of the Feature's resource name.
This value may be up to 60 characters, and valid characters
are ``[a-z0-9_]``. The first character cannot be a number.
The value must be unique within an EntityType.
"""
parent = proto.Field(proto.STRING, number=1,)
feature = proto.Field(proto.MESSAGE, number=2, message=gca_feature.Feature,)
feature_id = proto.Field(proto.STRING, number=3,)
class BatchCreateFeaturesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
Attributes:
parent (str):
Required. The resource name of the EntityType to create the
batch of Features under. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]):
Required. The request message specifying the Features to
create. All Features must be created under the same parent
EntityType. The ``parent`` field in each child request
message can be omitted. If ``parent`` is set in a child
request, then the value must match the ``parent`` value in
this request message.
"""
parent = proto.Field(proto.STRING, number=1,)
requests = proto.RepeatedField(
proto.MESSAGE, number=2, message="CreateFeatureRequest",
)
class BatchCreateFeaturesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
Attributes:
features (Sequence[google.cloud.aiplatform_v1.types.Feature]):
The Features created.
"""
features = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_feature.Feature,
)
class GetFeatureRequest(proto.Message):
r"""Request message for
[FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature].
Attributes:
name (str):
Required. The name of the Feature resource. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListFeaturesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures].
Attributes:
parent (str):
Required. The resource name of the Location to list
Features. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
filter (str):
Lists the Features that match the filter expression. The
following filters are supported:
- ``value_type``: Supports = and != comparisons.
- ``create_time``: Supports =, !=, <, >, >=, and <=
comparisons. Values must be in RFC 3339 format.
- ``update_time``: Supports =, !=, <, >, >=, and <=
comparisons. Values must be in RFC 3339 format.
- ``labels``: Supports key-value equality as well as key
presence.
Examples:
- ``value_type = DOUBLE`` --> Features whose type is
DOUBLE.
- ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"``
--> EntityTypes created or updated after
2020-01-31T15:30:00.000000Z.
- ``labels.active = yes AND labels.env = prod`` -->
Features having both (active: yes) and (env: prod)
labels.
- ``labels.env: *`` --> Any Feature which has a label with
'env' as the key.
page_size (int):
The maximum number of Features to return. The
service may return fewer than this value. If
unspecified, at most 1000 Features will be
returned. The maximum value is 1000; any value
greater than 1000 will be coerced to 1000.
page_token (str):
A page token, received from a previous
[FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]
call. Provide this to retrieve the subsequent page.
When paginating, all other parameters provided to
[FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]
must match the call that provided the page token.
order_by (str):
A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending. Supported fields:
- ``feature_id``
- ``value_type``
- ``create_time``
- ``update_time``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
latest_stats_count (int):
If set, return the most recent
[ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count]
of stats for each Feature in response. Valid value is [0,
10]. If number of stats exists <
[ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count],
return all existing stats.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
order_by = proto.Field(proto.STRING, number=5,)
read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask_pb2.FieldMask,)
latest_stats_count = proto.Field(proto.INT32, number=7,)
class ListFeaturesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures].
Attributes:
features (Sequence[google.cloud.aiplatform_v1.types.Feature]):
The Features matching the request.
next_page_token (str):
A token, which can be sent as
[ListFeaturesRequest.page_token][google.cloud.aiplatform.v1.ListFeaturesRequest.page_token]
to retrieve the next page. If this field is omitted, there
are no subsequent pages.
"""
@property
def raw_page(self):
return self
features = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_feature.Feature,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class SearchFeaturesRequest(proto.Message):
r"""Request message for
[FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures].
Attributes:
location (str):
Required. The resource name of the Location to search
Features. Format:
``projects/{project}/locations/{location}``
query (str):
Query string that is a conjunction of field-restricted
queries and/or field-restricted filters. Field-restricted
queries and filters can be combined using ``AND`` to form a
conjunction.
A field query is in the form FIELD:QUERY. This implicitly
checks if QUERY exists as a substring within Feature's
FIELD. The QUERY and the FIELD are converted to a sequence
of words (i.e. tokens) for comparison. This is done by:
- Removing leading/trailing whitespace and tokenizing the
search value. Characters that are not one of alphanumeric
``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are
treated as delimiters for tokens. ``*`` is treated as a
wildcard that matches characters within a token.
- Ignoring case.
- Prepending an asterisk to the first and appending an
asterisk to the last token in QUERY.
A QUERY must be either a singular token or a phrase. A
phrase is one or multiple words enclosed in double quotation
marks ("). With phrases, the order of the words is
important. Words in the phrase must be matching in order and
consecutively.
Supported FIELDs for field-restricted queries:
- ``feature_id``
- ``description``
- ``entity_type_id``
Examples:
- ``feature_id: foo`` --> Matches a Feature with ID
containing the substring ``foo`` (eg. ``foo``,
``foofeature``, ``barfoo``).
- ``feature_id: foo*feature`` --> Matches a Feature with ID
containing the substring ``foo*feature`` (eg.
``foobarfeature``).
- ``feature_id: foo AND description: bar`` --> Matches a
Feature with ID containing the substring ``foo`` and
description containing the substring ``bar``.
Besides field queries, the following exact-match filters are
supported. The exact-match filters do not support wildcards.
Unlike field-restricted queries, exact-match filters are
case-sensitive.
- ``feature_id``: Supports = comparisons.
- ``description``: Supports = comparisons. Multi-token
filters should be enclosed in quotes.
- ``entity_type_id``: Supports = comparisons.
- ``value_type``: Supports = and != comparisons.
- ``labels``: Supports key-value equality as well as key
presence.
- ``featurestore_id``: Supports = comparisons.
Examples:
- ``description = "foo bar"`` --> Any Feature with
description exactly equal to ``foo bar``
- ``value_type = DOUBLE`` --> Features whose type is
DOUBLE.
- ``labels.active = yes AND labels.env = prod`` -->
Features having both (active: yes) and (env: prod)
labels.
- ``labels.env: *`` --> Any Feature which has a label with
``env`` as the key.
page_size (int):
The maximum number of Features to return. The
service may return fewer than this value. If
unspecified, at most 100 Features will be
returned. The maximum value is 100; any value
greater than 100 will be coerced to 100.
page_token (str):
A page token, received from a previous
[FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]
call. Provide this to retrieve the subsequent page.
When paginating, all other parameters provided to
[FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures],
except ``page_size``, must match the call that provided the
page token.
"""
location = proto.Field(proto.STRING, number=1,)
query = proto.Field(proto.STRING, number=3,)
page_size = proto.Field(proto.INT32, number=4,)
page_token = proto.Field(proto.STRING, number=5,)
class SearchFeaturesResponse(proto.Message):
r"""Response message for
[FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures].
Attributes:
features (Sequence[google.cloud.aiplatform_v1.types.Feature]):
The Features matching the request.
Fields returned:
- ``name``
- ``description``
- ``labels``
- ``create_time``
- ``update_time``
next_page_token (str):
A token, which can be sent as
[SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1.SearchFeaturesRequest.page_token]
to retrieve the next page. If this field is omitted, there
are no subsequent pages.
"""
@property
def raw_page(self):
return self
features = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_feature.Feature,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateFeatureRequest(proto.Message):
r"""Request message for
[FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature].
Attributes:
feature (google.cloud.aiplatform_v1.types.Feature):
Required. The Feature's ``name`` field is used to identify
the Feature to be updated. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}``
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Field mask is used to specify the fields to be overwritten
in the Features resource by the update. The fields specified
in the update_mask are relative to the resource, not the
full request. A field will be overwritten if it is in the
mask. If the user does not provide a mask then only the
non-empty fields present in the request will be overwritten.
Set the update_mask to ``*`` to override all fields.
Updatable fields:
- ``description``
- ``labels``
- ``monitoring_config.snapshot_analysis.disabled``
- ``monitoring_config.snapshot_analysis.monitoring_interval``
"""
feature = proto.Field(proto.MESSAGE, number=1, message=gca_feature.Feature,)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteFeatureRequest(proto.Message):
r"""Request message for
[FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature].
Attributes:
name (str):
Required. The name of the Features to be deleted. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}``
"""
name = proto.Field(proto.STRING, number=1,)
class CreateFeaturestoreOperationMetadata(proto.Message):
r"""Details of operations that perform create Featurestore.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Featurestore.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class UpdateFeaturestoreOperationMetadata(proto.Message):
r"""Details of operations that perform update Featurestore.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Featurestore.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class ImportFeatureValuesOperationMetadata(proto.Message):
r"""Details of operations that perform import Feature values.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Featurestore import
Feature values.
imported_entity_count (int):
Number of entities that have been imported by
the operation.
imported_feature_value_count (int):
Number of Feature values that have been
imported by the operation.
invalid_row_count (int):
The number of rows in input source that weren't imported due
to either
- Not having any featureValues.
- Having a null entityId.
- Having a null timestamp.
- Not being parsable (applicable for CSV sources).
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
imported_entity_count = proto.Field(proto.INT64, number=2,)
imported_feature_value_count = proto.Field(proto.INT64, number=3,)
invalid_row_count = proto.Field(proto.INT64, number=6,)
class ExportFeatureValuesOperationMetadata(proto.Message):
r"""Details of operations that exports Features values.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Featurestore export
Feature values.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class BatchReadFeatureValuesOperationMetadata(proto.Message):
r"""Details of operations that batch reads Feature values.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Featurestore batch
read Features values.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class CreateEntityTypeOperationMetadata(proto.Message):
r"""Details of operations that perform create EntityType.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for EntityType.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class CreateFeatureOperationMetadata(proto.Message):
r"""Details of operations that perform create Feature.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Feature.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class BatchCreateFeaturesOperationMetadata(proto.Message):
r"""Details of operations that perform batch create Features.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
Operation metadata for Feature.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/types/featurestore_service.py | Python | apache-2.0 | 56,061 |
# -*- coding: utf-8 -*-
__author__ = 'Ostico <[email protected]>'
import unittest
import os
os.environ['DEBUG'] = "1"
os.environ['DEBUG_VERBOSE'] = "0"
import pyorient
class CommandTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CommandTestCase, self).__init__(*args, **kwargs)
self.client = None
self.cluster_info = None
self.class_id1 = None
def setUp(self):
self.client = pyorient.OrientDB("localhost", 2424)
self.client.connect("root", "root")
db_name = "test_tr"
try:
self.client.db_drop(db_name)
except pyorient.PyOrientStorageException as e:
print(e)
finally:
db = self.client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
pass
self.cluster_info = self.client.db_open(
db_name, "root", "root", pyorient.DB_TYPE_GRAPH, ""
)
self.class_id1 = \
self.client.command("create class my_v_class extends V")[0]
def test_boolean(self):
rec = self.client.command('create vertex v content {"abcdef":false,'
'"qwerty":TRUE}')
assert rec[0].abcdef is not True, "abcdef expected False: '%s'" % rec[
0].abcdef
assert rec[0].qwerty is True, "qwerty expected True: '%s'" % rec[
0].qwerty
rec_value = self.client.query('select from v')
assert rec_value[0].abcdef is not True, "abcdef expected False: '%s'" % \
rec_value[0].abcdef
assert rec_value[0].qwerty is True, "qwerty expected True: '%s'" % \
rec_value[0].qwerty
def test_record_create_nonstrings(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': 1.5, 'b': 'foo'}})
def test_record_create_embedded_list(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': ['bar', 'bar']}})
def test_record_create_embedded_dictionary(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': [{'bar': 'bar'}]}})
def test_new_orient_dict(self):
rec = self.client.command('create vertex v content {"a":false,'
'"q":TRUE}')
assert rec[0].a is False
assert rec[0].q is True
import re
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(rec[0]._version))
assert rec[0]._rid == '#10:0'
rec = {'a': 1, 'b': 2, 'c': 3}
rec_position = self.client.record_create(3, rec)
assert rec_position.a == 1
assert rec_position.b == 2
assert rec_position.c == 3
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(rec_position._version))
assert rec_position._rid == '#3:0'
res = self.client.query("select from " + rec_position._rid)
assert res[0].a == 1
assert res[0].b == 2
assert res[0].c == 3
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(res[0]._version))
assert res[0]._rid == '#3:0'
print(res[0].oRecordData['a'])
def test_embedded_map(self):
res = self.client.command(
'create vertex v content {"a":1,"b":{"d":"e"},"c":3}'
)
# print(res[0])
res = self.client.command(
'create vertex v content {"a":1,"b":{},"c":3}'
)
# print(res[0])
# print(res[0].oRecordData['b'])
assert res[0].oRecordData['b'] == {}, "Failed to asert that received " + \
res[0].oRecordData['b'] + " equals '{}"
res = self.client.command('create vertex v content {"a":1,"b":{}}')
# print(res[0])
assert res[0].oRecordData['b'] == {}, "Failed to asert that received " \
"" + res[0].oRecordData['b'] + \
" equals '{}"
res = self.client.command(
'create vertex v content {"b":{},"a":1,"d":{}}'
)
# print(res[0])
assert res[0].oRecordData['b'] == {}, "Failed to asert that received " \
"" + res[0].oRecordData['b'] + \
" equals '{}"
assert res[0].oRecordData['d'] == {}, "Failed to asert that received " \
"" + res[0].oRecordData['d'] + \
" equals '{}"
def test_nested_objects_1(self):
res = self.client.command(
'create vertex v content {"b":[[1]],"a":{},"d":[12],"c":["x"]}'
)
print(res[0])
def test_nested_objects_2(self):
res = self.client.command(
'create vertex v content {"b":[[1,"abc"]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1] == "abc"
def test_nested_objects_3(self):
res = self.client.command(
'create vertex v content {"b":[[1,{"abc":2}]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['abc'] == 2
def test_nested_objects_4(self):
res = self.client.command(
'create vertex v content {"b":[[1,{"abc":2}],[3,{"cde":4}]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['abc'] == 2
assert res[0].oRecordData['b'][1][0] == 3
assert res[0].oRecordData['b'][1][1]['cde'] == 4
def test_nested_objects_5(self):
res = self.client.command(
'create vertex v content '
'{"b":[[1,{"dx":[1,2]},"abc"]],"a":{},"d":[12],"c":["x"],"s":111}'
)
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['dx'][0] == 1
assert res[0].oRecordData['b'][0][1]['dx'][1] == 2
assert res[0].oRecordData['b'][0][2] == "abc"
assert res[0].oRecordData['a'] == {}
assert res[0].oRecordData['d'][0] == 12
assert res[0].oRecordData['c'][0] == "x"
assert res[0].oRecordData['s'] == 111
print(res[0])
def test_nested_objects_6(self):
res = self.client.command(
'create vertex v content '
'{"b":[[1,2,"abc"]]}'
)
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1] == 2
assert res[0].oRecordData['b'][0][2] == "abc"
print(res[0])
def test_nested_objects_7(self):
res = self.client.command(
'create vertex v content '
'{"b":[{"xx":{"xxx":[1,2,"abc"]}}]}'
)
assert isinstance(res[0].oRecordData['b'], list)
assert isinstance(res[0].oRecordData['b'][0], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx'], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx']['xxx'], list)
assert res[0].oRecordData['b'][0]['xx']['xxx'][0] == 1
assert res[0].oRecordData['b'][0]['xx']['xxx'][1] == 2
assert res[0].oRecordData['b'][0]['xx']['xxx'][2] == "abc"
print(res[0])
def test_nested_objects_8(self):
res = self.client.command(
'create vertex v content '
'{"b":[{"xx":{"xxx":[1,2,"abc"]}}],"c":[{"yy":{"yyy":[3,4,"cde"]}}]}'
)
assert isinstance(res[0].oRecordData['b'], list)
assert isinstance(res[0].oRecordData['b'][0], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx'], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx']['xxx'], list)
assert res[0].oRecordData['b'][0]['xx']['xxx'][0] == 1
assert res[0].oRecordData['b'][0]['xx']['xxx'][1] == 2
assert res[0].oRecordData['b'][0]['xx']['xxx'][2] == "abc"
assert isinstance(res[0].oRecordData['c'], list)
assert isinstance(res[0].oRecordData['c'][0], dict)
assert isinstance(res[0].oRecordData['c'][0]['yy'], dict)
assert isinstance(res[0].oRecordData['c'][0]['yy']['yyy'], list)
assert res[0].oRecordData['c'][0]['yy']['yyy'][0] == 3
assert res[0].oRecordData['c'][0]['yy']['yyy'][1] == 4
assert res[0].oRecordData['c'][0]['yy']['yyy'][2] == "cde"
print(res[0])
def test_nested_objects_9(self):
res = self.client.command(
'create vertex v content '
'{"a":[[1,2],[3,4],[5,6],null]}'
)
assert isinstance(res[0].oRecordData['a'], list)
assert isinstance(res[0].oRecordData['a'][0], list)
assert isinstance(res[0].oRecordData['a'][1], list)
assert isinstance(res[0].oRecordData['a'][2], list)
assert res[0].oRecordData['a'][0][0] == 1
assert res[0].oRecordData['a'][0][1] == 2
print(res[0])
def test_nested_objects_10(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":[1,2]}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert isinstance(res[0].oRecordData['embedded_map']['one'], list)
assert res[0].oRecordData['embedded_map']['one'][0] == 1
assert res[0].oRecordData['embedded_map']['one'][1] == 2
print(res[0])
def test_nested_objects_11(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":{"three":4}}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert isinstance(res[0].oRecordData['embedded_map']['one'], dict)
assert res[0].oRecordData['embedded_map']['one']["three"] == 4
print(res[0])
def test_nested_objects_12(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":2}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert res[0].oRecordData['embedded_map']['one'] == 2
print(res[0])
def test_nested_objects_13(self):
res = self.client.command(
'create vertex v content '
'{"a":1,"b":{},"c":3}'
)
assert res[0].oRecordData['a'] == 1
assert isinstance(res[0].oRecordData['b'], dict)
assert len(res[0].oRecordData['b']) == 0
assert res[0].oRecordData['c'] == 3
print(res[0])
def test_quotes(self):
import json
test_data = {'scenario': 'a "quote" follows'}
record = self.client.command("CREATE VERTEX V CONTENT " +
json.dumps(test_data))[0]
assert record._rid == '#10:0'
assert record.oRecordData['scenario'] == 'a "quote" follows'
def test_db_list(self):
self.client.connect("root", "root")
databases = self.client.db_list()
assert databases.oRecordData['databases']['GratefulDeadConcerts']
def test_datetime(self):
x = self.client.query(
"SELECT DATE('2015-01-02 03:04:05')"
)
x = x[0].oRecordData
import datetime
assert 'DATE' in x
assert isinstance(x['DATE'], datetime.datetime)
assert str(x['DATE']) == '2015-01-02 03:04:05'
def test_deserialize_numeric_types(self):
lon1 = self.client.command(
"CREATE VERTEX V CONTENT {'longitude': 1.1}")[0].longitude
lon2 = self.client.command(
"CREATE VERTEX V CONTENT {'longitude': -1.1}")[0].longitude
lon3 = self.client.command(
"CREATE VERTEX V CONTENT {'longNum': 5356336298435356336}"
)[0].longNum
lon4 = self.client.command(
"CREATE VERTEX V CONTENT {'sciNum': 6.022E23}"
)[0].sciNum
lon5 = self.client.command(
"CREATE VERTEX V CONTENT {'sciNum': 6.022E-23}"
)[0].sciNum
assert isinstance(lon1, float), \
"type(lon1) is not equal to 'float': %r" % type(lon1)
assert isinstance(lon2, float), \
"type(lon2) is not equal to 'float': %r" % type(lon2)
assert isinstance(lon4, float), \
"type(lon4) is not equal to 'float': %r" % type(lon4)
assert isinstance(lon5, float), \
"type(lon5) is not equal to 'float': %r" % type(lon5)
assert isinstance(lon3, int), \
"type(lon3) is not equal to 'int': %r" \
% type(lon3)
| orientechnologies/pyorient | tests/test_record_contents.py | Python | apache-2.0 | 12,906 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
import os
from django_nyt import VERSION
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def get_path(fname):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), fname)
def read(fname):
return open(get_path(fname)).read()
packages = find_packages()
try:
import pypandoc
long_description = pypandoc.convert(get_path('README.md'), 'rst')
long_description = long_description.split(
'<!---Illegal PyPi RST data -->')[0]
f = open(get_path('README.rst'), 'w')
f.write(long_description)
f.close()
print("Successfully converted README.md to README.rst")
except (IOError, ImportError):
# No long description... but nevermind, it's only for PyPi uploads.
long_description = ""
setup(
name="django-nyt",
version=VERSION,
author="Benjamin Bach",
author_email="[email protected]",
url="https://github.com/benjaoming/django-nyt",
description="A pluggable notification system written for the Django framework.",
license="Apache License 2.0",
keywords="django notification system",
packages=find_packages(exclude=["testproject", "testproject.*"]),
# long_description=long_description,
zip_safe=False,
install_requires=read('requirements.txt').split("\n"),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
include_package_data=True,
)
| spookylukey/django-nyt | setup.py | Python | apache-2.0 | 2,187 |
# May you do good and not evil
# May you find forgiveness for yourself and forgive others
# May you share freely, never taking more than you give. -- SQLite source code
#
# As we enjoy great advantages from the inventions of others, we should be glad
# of an opportunity to serve others by an invention of ours, and this we should
# do freely and generously. -- Ben Franklin
#
# (\
# ( \ /(o)\ caw!
# ( \/ ()/ /)
# ( `;.))'".)
# `(/////.-'
# =====))=))===()
# ///'
# //
# '
import datetime
import decimal
import hashlib
import logging
import operator
import re
import sys
import threading
import uuid
from collections import deque
from collections import namedtuple
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from copy import deepcopy
from functools import wraps
from inspect import isclass
__version__ = '2.4.7'
__all__ = [
'BareField',
'BigIntegerField',
'BlobField',
'BooleanField',
'CharField',
'Check',
'Clause',
'CompositeKey',
'DatabaseError',
'DataError',
'DateField',
'DateTimeField',
'DecimalField',
'DoesNotExist',
'DoubleField',
'DQ',
'Field',
'FloatField',
'fn',
'ForeignKeyField',
'ImproperlyConfigured',
'IntegerField',
'IntegrityError',
'InterfaceError',
'InternalError',
'JOIN_FULL',
'JOIN_INNER',
'JOIN_LEFT_OUTER',
'Model',
'MySQLDatabase',
'NotSupportedError',
'OperationalError',
'Param',
'PostgresqlDatabase',
'prefetch',
'PrimaryKeyField',
'ProgrammingError',
'Proxy',
'R',
'SqliteDatabase',
'SQL',
'TextField',
'TimeField',
'UUIDField',
'Window',
]
# Set default logging handler to avoid "No handlers could be found for logger
# "peewee"" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# All peewee-generated logs are logged to this namespace.
logger = logging.getLogger('peewee')
logger.addHandler(NullHandler())
# Python 2/3 compatibility helpers. These helpers are used internally and are
# not exported.
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
import builtins
from collections import Callable
from functools import reduce
callable = lambda c: isinstance(c, Callable)
unicode_type = str
string_type = bytes
basestring = str
print_ = getattr(builtins, 'print')
binary_construct = lambda s: bytes(s.encode('raw_unicode_escape'))
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
elif PY2:
unicode_type = unicode
string_type = basestring
binary_construct = buffer
def print_(s):
sys.stdout.write(s)
sys.stdout.write('\n')
exec('def reraise(tp, value, tb=None): raise tp, value, tb')
else:
raise RuntimeError('Unsupported python version.')
# By default, peewee supports Sqlite, MySQL and Postgresql.
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
try:
import psycopg2
from psycopg2 import extensions as pg_extensions
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql # prefer the C module.
except ImportError:
try:
import pymysql as mysql
except ImportError:
mysql = None
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, str)
sqlite3.register_adapter(datetime.date, str)
sqlite3.register_adapter(datetime.time, str)
DATETIME_PARTS = ['year', 'month', 'day', 'hour', 'minute', 'second']
DATETIME_LOOKUPS = set(DATETIME_PARTS)
# Sqlite does not support the `date_part` SQL function, so we will define an
# implementation in python.
SQLITE_DATETIME_FORMATS = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d',
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M')
def _sqlite_date_part(lookup_type, datetime_string):
assert lookup_type in DATETIME_LOOKUPS
if not datetime_string:
return
dt = format_date_time(datetime_string, SQLITE_DATETIME_FORMATS)
return getattr(dt, lookup_type)
SQLITE_DATE_TRUNC_MAPPING = {
'year': '%Y',
'month': '%Y-%m',
'day': '%Y-%m-%d',
'hour': '%Y-%m-%d %H',
'minute': '%Y-%m-%d %H:%M',
'second': '%Y-%m-%d %H:%M:%S'}
MYSQL_DATE_TRUNC_MAPPING = SQLITE_DATE_TRUNC_MAPPING.copy()
MYSQL_DATE_TRUNC_MAPPING['minute'] = '%Y-%m-%d %H:%i'
MYSQL_DATE_TRUNC_MAPPING['second'] = '%Y-%m-%d %H:%i:%S'
def _sqlite_date_trunc(lookup_type, datetime_string):
assert lookup_type in SQLITE_DATE_TRUNC_MAPPING
if not datetime_string:
return
dt = format_date_time(datetime_string, SQLITE_DATETIME_FORMATS)
return dt.strftime(SQLITE_DATE_TRUNC_MAPPING[lookup_type])
def _sqlite_regexp(regex, value):
return re.search(regex, value, re.I) is not None
# Operators used in binary expressions.
OP_AND = 'and'
OP_OR = 'or'
OP_ADD = '+'
OP_SUB = '-'
OP_MUL = '*'
OP_DIV = '/'
OP_BIN_AND = '&'
OP_BIN_OR = '|'
OP_XOR = '^'
OP_MOD = '%'
OP_EQ = '='
OP_LT = '<'
OP_LTE = '<='
OP_GT = '>'
OP_GTE = '>='
OP_NE = '!='
OP_IN = 'in'
OP_NOT_IN = 'not in'
OP_IS = 'is'
OP_IS_NOT = 'is not'
OP_LIKE = 'like'
OP_ILIKE = 'ilike'
OP_BETWEEN = 'between'
OP_REGEXP = 'regexp'
OP_CONCAT = '||'
# To support "django-style" double-underscore filters, create a mapping between
# operation name and operation code, e.g. "__eq" == OP_EQ.
DJANGO_MAP = {
'eq': OP_EQ,
'lt': OP_LT,
'lte': OP_LTE,
'gt': OP_GT,
'gte': OP_GTE,
'ne': OP_NE,
'in': OP_IN,
'is': OP_IS,
'like': OP_LIKE,
'ilike': OP_ILIKE,
'regexp': OP_REGEXP,
}
JOIN_INNER = 'inner'
JOIN_LEFT_OUTER = 'left outer'
JOIN_RIGHT_OUTER = 'right outer'
JOIN_FULL = 'full'
# Helper functions that are used in various parts of the codebase.
def merge_dict(source, overrides):
merged = source.copy()
merged.update(overrides)
return merged
def pythonify_name(name):
name = re.sub('([a-z_])([A-Z][_a-z])', '\\1 \\2', name)
return re.sub('[^\w+]', '_', name.lower())
def returns_clone(func):
"""
Method decorator that will "clone" the object before applying the given
method. This ensures that state is mutated in a more predictable fashion,
and promotes the use of method-chaining.
"""
def inner(self, *args, **kwargs):
clone = self.clone() # Assumes object implements `clone`.
func(clone, *args, **kwargs)
return clone
inner.call_local = func # Provide a way to call without cloning.
return inner
def not_allowed(func):
"""
Method decorator to indicate a method is not allowed to be called. Will
raise a `NotImplementedError`.
"""
def inner(self, *args, **kwargs):
raise NotImplementedError('%s is not allowed on %s instances' % (
func, type(self).__name__))
return inner
class Proxy(object):
"""
Proxy class useful for situations when you wish to defer the initialization
of an object.
"""
__slots__ = ['obj', '_callbacks']
def __init__(self):
self._callbacks = []
self.initialize(None)
def initialize(self, obj):
self.obj = obj
for callback in self._callbacks:
callback(obj)
def attach_callback(self, callback):
self._callbacks.append(callback)
return callback
def __getattr__(self, attr):
if self.obj is None:
raise AttributeError('Cannot use uninitialized Proxy.')
return getattr(self.obj, attr)
def __setattr__(self, attr, value):
if attr not in self.__slots__:
raise AttributeError('Cannot set attribute on proxy.')
return super(Proxy, self).__setattr__(attr, value)
class _CDescriptor(object):
def __get__(self, instance, instance_type=None):
if instance is not None:
return Entity(instance._alias)
return self
# Classes representing the query tree.
class Node(object):
"""Base-class for any part of a query which shall be composable."""
c = _CDescriptor()
_node_type = 'node'
def __init__(self):
self._negated = False
self._alias = None
self._ordering = None # ASC or DESC.
def clone_base(self):
return type(self)()
def clone(self):
inst = self.clone_base()
inst._negated = self._negated
inst._alias = self._alias
inst._ordering = self._ordering
return inst
@returns_clone
def __invert__(self):
self._negated = not self._negated
@returns_clone
def alias(self, a=None):
self._alias = a
@returns_clone
def asc(self):
self._ordering = 'ASC'
@returns_clone
def desc(self):
self._ordering = 'DESC'
def _e(op, inv=False):
"""
Lightweight factory which returns a method that builds an Expression
consisting of the left-hand and right-hand operands, using `op`.
"""
def inner(self, rhs):
if inv:
return Expression(rhs, op, self)
return Expression(self, op, rhs)
return inner
__and__ = _e(OP_AND)
__or__ = _e(OP_OR)
__add__ = _e(OP_ADD)
__sub__ = _e(OP_SUB)
__mul__ = _e(OP_MUL)
__div__ = __truediv__ = _e(OP_DIV)
__xor__ = _e(OP_XOR)
__radd__ = _e(OP_ADD, inv=True)
__rsub__ = _e(OP_SUB, inv=True)
__rmul__ = _e(OP_MUL, inv=True)
__rdiv__ = __rtruediv__ = _e(OP_DIV, inv=True)
__rand__ = _e(OP_AND, inv=True)
__ror__ = _e(OP_OR, inv=True)
__rxor__ = _e(OP_XOR, inv=True)
def __eq__(self, rhs):
if rhs is None:
return Expression(self, OP_IS, None)
return Expression(self, OP_EQ, rhs)
def __ne__(self, rhs):
if rhs is None:
return Expression(self, OP_IS_NOT, None)
return Expression(self, OP_NE, rhs)
__lt__ = _e(OP_LT)
__le__ = _e(OP_LTE)
__gt__ = _e(OP_GT)
__ge__ = _e(OP_GTE)
__lshift__ = _e(OP_IN)
__rshift__ = _e(OP_IS)
__mod__ = _e(OP_LIKE)
__pow__ = _e(OP_ILIKE)
bin_and = _e(OP_BIN_AND)
bin_or = _e(OP_BIN_OR)
# Special expressions.
def in_(self, *rhs):
return Expression(self, OP_IN, rhs)
def not_in(self, *rhs):
return Expression(self, OP_NOT_IN, rhs)
def is_null(self, is_null=True):
if is_null:
return Expression(self, OP_IS, None)
return Expression(self, OP_IS_NOT, None)
def contains(self, rhs):
return Expression(self, OP_ILIKE, '%%%s%%' % rhs)
def startswith(self, rhs):
return Expression(self, OP_ILIKE, '%s%%' % rhs)
def endswith(self, rhs):
return Expression(self, OP_ILIKE, '%%%s' % rhs)
def between(self, low, high):
return Expression(self, OP_BETWEEN, Clause(low, R('AND'), high))
def regexp(self, expression):
return Expression(self, OP_REGEXP, expression)
def concat(self, rhs):
return Expression(self, OP_CONCAT, rhs)
class Expression(Node):
"""A binary expression, e.g `foo + 1` or `bar < 7`."""
_node_type = 'expression'
def __init__(self, lhs, op, rhs, flat=False):
super(Expression, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
self.flat = flat
def clone_base(self):
return Expression(self.lhs, self.op, self.rhs, self.flat)
class DQ(Node):
"""A "django-style" filter expression, e.g. {'foo__eq': 'x'}."""
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
def clone_base(self):
return DQ(**self.query)
class Param(Node):
"""
Arbitrary parameter passed into a query. Instructs the query compiler to
specifically treat this value as a parameter, useful for `list` which is
special-cased for `IN` lookups.
"""
_node_type = 'param'
def __init__(self, value, conv=None):
self.value = value
self.conv = conv
super(Param, self).__init__()
def clone_base(self):
return Param(self.value, self.conv)
class Passthrough(Param):
_node_type = 'passthrough'
class SQL(Node):
"""An unescaped SQL string, with optional parameters."""
_node_type = 'sql'
def __init__(self, value, *params):
self.value = value
self.params = params
super(SQL, self).__init__()
def clone_base(self):
return SQL(self.value, *self.params)
R = SQL # backwards-compat.
class Func(Node):
"""An arbitrary SQL function call."""
_node_type = 'func'
def __init__(self, name, *arguments):
self.name = name
self.arguments = arguments
self._coerce = True
super(Func, self).__init__()
@returns_clone
def coerce(self, coerce=True):
self._coerce = coerce
def clone_base(self):
res = Func(self.name, *self.arguments)
res._coerce = self._coerce
return res
def over(self, partition_by=None, order_by=None, window=None):
if isinstance(partition_by, Window) and window is None:
window = partition_by
if window is None:
sql = Window(
partition_by=partition_by, order_by=order_by).__sql__()
else:
sql = SQL(window._alias)
return Clause(self, SQL('OVER'), sql)
def __getattr__(self, attr):
def dec(*args, **kwargs):
return Func(attr, *args, **kwargs)
return dec
# fn is a factory for creating `Func` objects and supports a more friendly
# API. So instead of `Func("LOWER", param)`, `fn.LOWER(param)`.
fn = Func(None)
class Window(Node):
def __init__(self, partition_by=None, order_by=None):
super(Window, self).__init__()
self.partition_by = partition_by
self.order_by = order_by
self._alias = self._alias or 'w'
def __sql__(self):
over_clauses = []
if self.partition_by:
over_clauses.append(Clause(
SQL('PARTITION BY'),
CommaClause(*self.partition_by)))
if self.order_by:
over_clauses.append(Clause(
SQL('ORDER BY'),
CommaClause(*self.order_by)))
return EnclosedClause(Clause(*over_clauses))
def clone_base(self):
return Window(self.partition_by, self.order_by)
class Clause(Node):
"""A SQL clause, one or more Node objects joined by spaces."""
_node_type = 'clause'
glue = ' '
parens = False
def __init__(self, *nodes):
super(Clause, self).__init__()
self.nodes = list(nodes)
def clone_base(self):
clone = Clause(*self.nodes)
clone.glue = self.glue
clone.parens = self.parens
return clone
class CommaClause(Clause):
"""One or more Node objects joined by commas, no parens."""
glue = ', '
class EnclosedClause(CommaClause):
"""One or more Node objects joined by commas and enclosed in parens."""
parens = True
class Entity(Node):
"""A quoted-name or entity, e.g. "table"."column"."""
_node_type = 'entity'
def __init__(self, *path):
super(Entity, self).__init__()
self.path = path
def clone_base(self):
return Entity(*self.path)
def __getattr__(self, attr):
return Entity(*filter(None, self.path + (attr,)))
class Check(SQL):
"""Check constraint, usage: `Check('price > 10')`."""
def __init__(self, value):
super(Check, self).__init__('CHECK (%s)' % value)
class _StripParens(Node):
_node_type = 'strip_parens'
def __init__(self, node):
super(_StripParens, self).__init__()
self.node = node
JoinMetadata = namedtuple('JoinMetadata', (
'source', 'target_attr', 'dest', 'to_field', 'related_name'))
class Join(namedtuple('_Join', ('dest', 'join_type', 'on'))):
def get_foreign_key(self, source, dest):
fk_field = source._meta.rel_for_model(dest)
if fk_field is not None:
return fk_field, False
reverse_rel = source._meta.reverse_rel_for_model(dest)
if reverse_rel is not None:
return reverse_rel, True
return None, None
def join_metadata(self, source):
is_model_alias = isinstance(self.dest, ModelAlias)
if is_model_alias:
dest = self.dest.model_class
else:
dest = self.dest
is_expr = isinstance(self.on, Expression)
join_alias = is_expr and self.on._alias or None
target_attr = to_field = related_name = None
fk_field, is_backref = self.get_foreign_key(source, dest)
if fk_field is not None:
if is_backref:
target_attr = dest._meta.db_table
related_name = fk_field.related_name
else:
target_attr = fk_field.name
to_field = fk_field.to_field.name
elif is_expr and hasattr(self.on.lhs, 'name'):
target_attr = self.on.lhs.name
else:
target_attr = dest._meta.db_table
return JoinMetadata(
source,
join_alias or target_attr,
self.dest,
to_field,
related_name)
class FieldDescriptor(object):
# Fields are exposed as descriptors in order to control access to the
# underlying "raw" data.
def __init__(self, field):
self.field = field
self.att_name = self.field.name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance._data.get(self.att_name)
return self.field
def __set__(self, instance, value):
instance._data[self.att_name] = value
instance._dirty.add(self.att_name)
class Field(Node):
"""A column on a table."""
_field_counter = 0
_order = 0
_node_type = 'field'
db_field = 'unknown'
def __init__(self, null=False, index=False, unique=False,
verbose_name=None, help_text=None, db_column=None,
default=None, choices=None, primary_key=False, sequence=None,
constraints=None, schema=None):
self.null = null
self.index = index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.choices = choices # Used for metadata purposes, not enforced.
self.primary_key = primary_key
self.sequence = sequence # Name of sequence, e.g. foo_id_seq.
self.constraints = constraints # List of column constraints.
self.schema = schema # Name of schema, e.g. 'public'.
# Used internally for recovering the order in which Fields were defined
# on the Model class.
Field._field_counter += 1
self._order = Field._field_counter
self._sort_key = (self.primary_key and 1 or 2), self._order
self._is_bound = False # Whether the Field is "bound" to a Model.
super(Field, self).__init__()
def clone_base(self, **kwargs):
inst = type(self)(
null=self.null,
index=self.index,
unique=self.unique,
verbose_name=self.verbose_name,
help_text=self.help_text,
db_column=self.db_column,
default=self.default,
choices=self.choices,
primary_key=self.primary_key,
sequence=self.sequence,
constraints=self.constraints,
schema=self.schema,
**kwargs)
if self._is_bound:
inst.name = self.name
inst.model_class = self.model_class
inst._is_bound = self._is_bound
return inst
def add_to_class(self, model_class, name):
"""
Hook that replaces the `Field` attribute on a class with a named
`FieldDescriptor`. Called by the metaclass during construction of the
`Model`.
"""
self.name = name
self.model_class = model_class
self.db_column = self.db_column or self.name
if not self.verbose_name:
self.verbose_name = re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
setattr(model_class, name, FieldDescriptor(self))
self._is_bound = True
def get_database(self):
return self.model_class._meta.database
def get_column_type(self):
field_type = self.get_db_field()
return self.get_database().compiler().get_column_type(field_type)
def get_db_field(self):
return self.db_field
def get_modifiers(self):
return None
def coerce(self, value):
return value
def db_value(self, value):
"""Convert the python value for storage in the database."""
return value if value is None else self.coerce(value)
def python_value(self, value):
"""Convert the database value to a pythonic value."""
return value if value is None else self.coerce(value)
def _as_entity(self, with_table=False):
if with_table:
return Entity(self.model_class._meta.db_table, self.db_column)
return Entity(self.db_column)
def __ddl_column__(self, column_type):
"""Return the column type, e.g. VARCHAR(255) or REAL."""
modifiers = self.get_modifiers()
if modifiers:
return SQL(
'%s(%s)' % (column_type, ', '.join(map(str, modifiers))))
return SQL(column_type)
def __ddl__(self, column_type):
"""Return a list of Node instances that defines the column."""
ddl = [self._as_entity(), self.__ddl_column__(column_type)]
if not self.null:
ddl.append(SQL('NOT NULL'))
if self.primary_key:
ddl.append(SQL('PRIMARY KEY'))
if self.sequence:
ddl.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
if self.constraints:
ddl.extend(self.constraints)
return ddl
def __hash__(self):
return hash(self.name + '.' + self.model_class.__name__)
class BareField(Field):
db_field = 'bare'
class IntegerField(Field):
db_field = 'int'
coerce = int
class BigIntegerField(IntegerField):
db_field = 'bigint'
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
db_field = 'float'
coerce = float
class DoubleField(FloatField):
db_field = 'double'
class DecimalField(Field):
db_field = 'decimal'
def __init__(self, max_digits=10, decimal_places=5, auto_round=False,
rounding=None, *args, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.auto_round = auto_round
self.rounding = rounding or decimal.DefaultContext.rounding
super(DecimalField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(DecimalField, self).clone_base(
max_digits=self.max_digits,
decimal_places=self.decimal_places,
auto_round=self.auto_round,
rounding=self.rounding,
**kwargs)
def get_modifiers(self):
return [self.max_digits, self.decimal_places]
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.auto_round:
exp = D(10) ** (-self.decimal_places)
rounding = self.rounding
return D(str(value)).quantize(exp, rounding=rounding)
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
def coerce_to_unicode(s, encoding='utf-8'):
if isinstance(s, unicode_type):
return s
elif isinstance(s, string_type):
return s.decode(encoding)
return unicode_type(s)
class CharField(Field):
db_field = 'string'
def __init__(self, max_length=255, *args, **kwargs):
self.max_length = max_length
super(CharField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(CharField, self).clone_base(
max_length=self.max_length,
**kwargs)
def get_modifiers(self):
return self.max_length and [self.max_length] or None
def coerce(self, value):
return coerce_to_unicode(value or '')
class TextField(Field):
db_field = 'text'
def coerce(self, value):
return coerce_to_unicode(value or '')
class BlobField(Field):
db_field = 'blob'
def db_value(self, value):
if isinstance(value, basestring):
return binary_construct(value)
return value
class UUIDField(Field):
db_field = 'uuid'
def db_value(self, value):
return None if value is None else str(value)
def python_value(self, value):
return None if value is None else uuid.UUID(value)
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
def _date_part(date_part):
def dec(self):
return self.model_class._meta.database.extract_date(date_part, self)
return dec
class _BaseFormattedField(Field):
formats = None
def __init__(self, formats=None, *args, **kwargs):
if formats is not None:
self.formats = formats
super(_BaseFormattedField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(_BaseFormattedField, self).clone_base(
formats=self.formats,
**kwargs)
class DateTimeField(_BaseFormattedField):
db_field = 'datetime'
formats = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
def python_value(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.formats)
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class DateField(_BaseFormattedField):
db_field = 'date'
formats = [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.formats, pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
class TimeField(_BaseFormattedField):
db_field = 'time'
formats = [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.formats, pp)
elif value and isinstance(value, datetime.datetime):
return value.time()
return value
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class BooleanField(Field):
db_field = 'bool'
coerce = bool
class RelationDescriptor(FieldDescriptor):
"""Foreign-key abstraction to replace a related PK with a related model."""
def __init__(self, field, rel_model):
self.rel_model = rel_model
super(RelationDescriptor, self).__init__(field)
def get_object_or_id(self, instance):
rel_id = instance._data.get(self.att_name)
if rel_id is not None or self.att_name in instance._obj_cache:
if self.att_name not in instance._obj_cache:
obj = self.rel_model.get(self.field.to_field == rel_id)
instance._obj_cache[self.att_name] = obj
return instance._obj_cache[self.att_name]
elif not self.field.null:
raise self.rel_model.DoesNotExist
return rel_id
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.get_object_or_id(instance)
return self.field
def __set__(self, instance, value):
if isinstance(value, self.rel_model):
instance._data[self.att_name] = getattr(
value, self.field.to_field.name)
instance._obj_cache[self.att_name] = value
else:
orig_value = instance._data.get(self.att_name)
instance._data[self.att_name] = value
if orig_value != value and self.att_name in instance._obj_cache:
del instance._obj_cache[self.att_name]
instance._dirty.add(self.att_name)
class ReverseRelationDescriptor(object):
"""Back-reference to expose related objects as a `SelectQuery`."""
def __init__(self, field):
self.field = field
self.rel_model = field.model_class
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.rel_model.select().where(
self.field == getattr(instance, self.field.to_field.name))
return self
class ForeignKeyField(IntegerField):
def __init__(self, rel_model, related_name=None, on_delete=None,
on_update=None, extra=None, to_field=None, *args, **kwargs):
if rel_model != 'self' and not isinstance(rel_model, Proxy) and not \
issubclass(rel_model, Model):
raise TypeError('Unexpected value for `rel_model`. Expected '
'`Model`, `Proxy` or "self"')
self.rel_model = rel_model
self._related_name = related_name
self.deferred = isinstance(rel_model, Proxy)
self.on_delete = on_delete
self.on_update = on_update
self.extra = extra
self.to_field = to_field
super(ForeignKeyField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(ForeignKeyField, self).clone_base(
rel_model=self.rel_model,
related_name=self.related_name,
on_delete=self.on_delete,
on_update=self.on_update,
extra=self.extra,
to_field=self.to_field,
**kwargs)
def _get_descriptor(self):
return RelationDescriptor(self, self.rel_model)
def _get_backref_descriptor(self):
return ReverseRelationDescriptor(self)
def _get_related_name(self):
return self._related_name or ('%s_set' % self.model_class._meta.name)
def add_to_class(self, model_class, name):
if isinstance(self.rel_model, Proxy):
def callback(rel_model):
self.rel_model = rel_model
self.add_to_class(model_class, name)
self.rel_model.attach_callback(callback)
return
self.name = name
self.model_class = model_class
self.db_column = self.db_column or '%s_id' % self.name
if not self.verbose_name:
self.verbose_name = re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
self.related_name = self._get_related_name()
if self.rel_model == 'self':
self.rel_model = self.model_class
if self.to_field is not None:
if not isinstance(self.to_field, Field):
self.to_field = getattr(self.rel_model, self.to_field)
else:
self.to_field = self.rel_model._meta.primary_key
if model_class._meta.validate_backrefs:
if self.related_name in self.rel_model._meta.fields:
error = ('Foreign key: %s.%s related name "%s" collision with '
'model field of the same name.')
raise AttributeError(error % (
self.model_class._meta.name, self.name, self.related_name))
if self.related_name in self.rel_model._meta.reverse_rel:
error = ('Foreign key: %s.%s related name "%s" collision with '
'foreign key using same related_name.')
raise AttributeError(error % (
self.model_class._meta.name, self.name, self.related_name))
setattr(model_class, name, self._get_descriptor())
setattr(self.rel_model,
self.related_name,
self._get_backref_descriptor())
self._is_bound = True
model_class._meta.rel[self.name] = self
self.rel_model._meta.reverse_rel[self.related_name] = self
def get_db_field(self):
"""
Overridden to ensure Foreign Keys use same column type as the primary
key they point to.
"""
if not isinstance(self.to_field, PrimaryKeyField):
return self.to_field.get_db_field()
return super(ForeignKeyField, self).get_db_field()
def get_modifiers(self):
if not isinstance(self.to_field, PrimaryKeyField):
return self.to_field.get_modifiers()
return super(ForeignKeyField, self).get_modifiers()
def coerce(self, value):
return self.to_field.coerce(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = value._get_pk_value()
return self.to_field.db_value(value)
class CompositeKey(object):
"""A primary key composed of multiple columns."""
sequence = None
def __init__(self, *field_names):
self.field_names = field_names
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
setattr(model_class, name, self)
def __get__(self, instance, instance_type=None):
if instance is not None:
return tuple([getattr(instance, field_name)
for field_name in self.field_names])
return self
def __set__(self, instance, value):
pass
def __eq__(self, other):
expressions = [(self.model_class._meta.fields[field] == value)
for field, value in zip(self.field_names, other)]
return reduce(operator.and_, expressions)
class AliasMap(object):
prefix = 't'
def __init__(self):
self._alias_map = {}
self._counter = 0
def __repr__(self):
return '<AliasMap: %s>' % self._alias_map
def add(self, obj, alias=None):
if obj in self._alias_map:
return
self._counter += 1
self._alias_map[obj] = alias or '%s%s' % (self.prefix, self._counter)
def __getitem__(self, obj):
if obj not in self._alias_map:
self.add(obj)
return self._alias_map[obj]
def __contains__(self, obj):
return obj in self._alias_map
def update(self, alias_map):
if alias_map:
for obj, alias in alias_map._alias_map.items():
if obj not in self:
self._alias_map[obj] = alias
return self
class QueryCompiler(object):
# Mapping of `db_type` to actual column type used by database driver.
# Database classes may provide additional column types or overrides.
field_map = {
'bare': '',
'bigint': 'BIGINT',
'blob': 'BLOB',
'bool': 'SMALLINT',
'date': 'DATE',
'datetime': 'DATETIME',
'decimal': 'DECIMAL',
'double': 'REAL',
'float': 'REAL',
'int': 'INTEGER',
'primary_key': 'INTEGER',
'string': 'VARCHAR',
'text': 'TEXT',
'time': 'TIME',
}
# Mapping of OP_ to actual SQL operation. For most databases this will be
# the same, but some column types or databases may support additional ops.
# Like `field_map`, Database classes may extend or override these.
op_map = {
OP_EQ: '=',
OP_LT: '<',
OP_LTE: '<=',
OP_GT: '>',
OP_GTE: '>=',
OP_NE: '!=',
OP_IN: 'IN',
OP_NOT_IN: 'NOT IN',
OP_IS: 'IS',
OP_IS_NOT: 'IS NOT',
OP_BIN_AND: '&',
OP_BIN_OR: '|',
OP_LIKE: 'LIKE',
OP_ILIKE: 'ILIKE',
OP_BETWEEN: 'BETWEEN',
OP_ADD: '+',
OP_SUB: '-',
OP_MUL: '*',
OP_DIV: '/',
OP_XOR: '#',
OP_AND: 'AND',
OP_OR: 'OR',
OP_MOD: '%',
OP_REGEXP: 'REGEXP',
OP_CONCAT: '||',
}
join_map = {
JOIN_INNER: 'INNER',
JOIN_LEFT_OUTER: 'LEFT OUTER',
JOIN_RIGHT_OUTER: 'RIGHT OUTER',
JOIN_FULL: 'FULL',
}
alias_map_class = AliasMap
def __init__(self, quote_char='"', interpolation='?', field_overrides=None,
op_overrides=None):
self.quote_char = quote_char
self.interpolation = interpolation
self._field_map = merge_dict(self.field_map, field_overrides or {})
self._op_map = merge_dict(self.op_map, op_overrides or {})
self._parse_map = self.get_parse_map()
self._unknown_types = set(['param'])
def get_parse_map(self):
# To avoid O(n) lookups when parsing nodes, use a lookup table for
# common node types O(1).
return {
'expression': self._parse_expression,
'param': self._parse_param,
'passthrough': self._parse_param,
'func': self._parse_func,
'clause': self._parse_clause,
'entity': self._parse_entity,
'field': self._parse_field,
'sql': self._parse_sql,
'select_query': self._parse_select_query,
'compound_select_query': self._parse_compound_select_query,
'strip_parens': self._parse_strip_parens,
}
def quote(self, s):
return '%s%s%s' % (self.quote_char, s, self.quote_char)
def get_column_type(self, f):
return self._field_map[f]
def get_op(self, q):
return self._op_map[q]
def _sorted_fields(self, field_dict):
return sorted(field_dict.items(), key=lambda i: i[0]._sort_key)
def _clean_extra_parens(self, s):
# Quick sanity check.
if not s or s[0] != '(':
return s
ct = i = 0
l = len(s)
while i < l:
if s[i] == '(' and s[l - 1] == ')':
ct += 1
i += 1
l -= 1
else:
break
if ct:
# If we ever end up with negatively-balanced parentheses, then we
# know that one of the outer parentheses was required.
unbalanced_ct = 0
required = 0
for i in range(ct, l - ct):
if s[i] == '(':
unbalanced_ct += 1
elif s[i] == ')':
unbalanced_ct -= 1
if unbalanced_ct < 0:
required += 1
unbalanced_ct = 0
if required == ct:
break
ct -= required
if ct > 0:
return s[ct:-ct]
return s
def _parse_default(self, node, alias_map, conv):
return self.interpolation, [node]
def _parse_expression(self, node, alias_map, conv):
if isinstance(node.lhs, Field):
conv = node.lhs
lhs, lparams = self.parse_node(node.lhs, alias_map, conv)
rhs, rparams = self.parse_node(node.rhs, alias_map, conv)
template = '%s %s %s' if node.flat else '(%s %s %s)'
sql = template % (lhs, self.get_op(node.op), rhs)
return sql, lparams + rparams
def _parse_param(self, node, alias_map, conv):
if node.conv:
params = [node.conv(node.value)]
else:
params = [node.value]
return self.interpolation, params
def _parse_func(self, node, alias_map, conv):
conv = node._coerce and conv or None
sql, params = self.parse_node_list(node.arguments, alias_map, conv)
return '%s(%s)' % (node.name, self._clean_extra_parens(sql)), params
def _parse_clause(self, node, alias_map, conv):
sql, params = self.parse_node_list(
node.nodes, alias_map, conv, node.glue)
if node.parens:
sql = '(%s)' % self._clean_extra_parens(sql)
return sql, params
def _parse_entity(self, node, alias_map, conv):
return '.'.join(map(self.quote, node.path)), []
def _parse_sql(self, node, alias_map, conv):
return node.value, list(node.params)
def _parse_field(self, node, alias_map, conv):
if alias_map:
sql = '.'.join((
self.quote(alias_map[node.model_class]),
self.quote(node.db_column)))
else:
sql = self.quote(node.db_column)
return sql, []
def _parse_compound_select_query(self, node, alias_map, conv):
l, lp = self.generate_select(node.lhs, alias_map)
r, rp = self.generate_select(node.rhs, alias_map)
sql = '(%s %s %s)' % (l, node.operator, r)
return sql, lp + rp
def _parse_select_query(self, node, alias_map, conv):
clone = node.clone()
if not node._explicit_selection:
if conv and isinstance(conv, ForeignKeyField):
select_field = conv.to_field
else:
select_field = clone.model_class._meta.primary_key
clone._select = (select_field,)
sub, params = self.generate_select(clone, alias_map)
return '(%s)' % self._clean_extra_parens(sub), params
def _parse_strip_parens(self, node, alias_map, conv):
sql, params = self.parse_node(node.node, alias_map, conv)
return self._clean_extra_parens(sql), params
def _parse(self, node, alias_map, conv):
# By default treat the incoming node as a raw value that should be
# parameterized.
node_type = getattr(node, '_node_type', None)
unknown = False
if node_type in self._parse_map:
sql, params = self._parse_map[node_type](node, alias_map, conv)
unknown = node_type in self._unknown_types
elif isinstance(node, (list, tuple)):
# If you're wondering how to pass a list into your query, simply
# wrap it in Param().
sql, params = self.parse_node_list(node, alias_map, conv)
sql = '(%s)' % sql
elif isinstance(node, Model):
sql = self.interpolation
if conv and isinstance(conv, ForeignKeyField):
params = [
conv.to_field.db_value(getattr(node, conv.to_field.name))]
else:
params = [node._get_pk_value()]
elif (isclass(node) and issubclass(node, Model)) or \
isinstance(node, ModelAlias):
entity = node._as_entity().alias(alias_map[node])
sql, params = self.parse_node(entity, alias_map, conv)
else:
sql, params = self._parse_default(node, alias_map, conv)
unknown = True
return sql, params, unknown
def parse_node(self, node, alias_map=None, conv=None):
sql, params, unknown = self._parse(node, alias_map, conv)
if unknown and conv and params:
params = [conv.db_value(i) for i in params]
if isinstance(node, Node):
if node._negated:
sql = 'NOT %s' % sql
if node._alias:
sql = ' '.join((sql, 'AS', node._alias))
if node._ordering:
sql = ' '.join((sql, node._ordering))
return sql, params
def parse_node_list(self, nodes, alias_map, conv=None, glue=', '):
sql = []
params = []
for node in nodes:
node_sql, node_params = self.parse_node(node, alias_map, conv)
sql.append(node_sql)
params.extend(node_params)
return glue.join(sql), params
def calculate_alias_map(self, query, alias_map=None):
new_map = self.alias_map_class()
if alias_map is not None:
new_map._counter = alias_map._counter
new_map.add(query.model_class, query.model_class._meta.table_alias)
for src_model, joined_models in query._joins.items():
new_map.add(src_model, src_model._meta.table_alias)
for join_obj in joined_models:
if isinstance(join_obj.dest, Node):
new_map.add(join_obj.dest, join_obj.dest.alias)
else:
new_map.add(join_obj.dest, join_obj.dest._meta.table_alias)
return new_map.update(alias_map)
def build_query(self, clauses, alias_map=None):
return self.parse_node(Clause(*clauses), alias_map)
def generate_joins(self, joins, model_class, alias_map):
# Joins are implemented as an adjancency-list graph. Perform a
# depth-first search of the graph to generate all the necessary JOINs.
clauses = []
seen = set()
q = [model_class]
while q:
curr = q.pop()
if curr not in joins or curr in seen:
continue
seen.add(curr)
for join in joins[curr]:
src = curr
dest = join.dest
if isinstance(join.on, Expression):
# Clear any alias on the join expression.
constraint = join.on.clone().alias()
else:
field = src._meta.rel_for_model(dest, join.on)
if field:
left_field = field
right_field = field.to_field
else:
field = dest._meta.rel_for_model(src, join.on)
left_field = field.to_field
right_field = field
constraint = (left_field == right_field)
if isinstance(dest, Node):
# TODO: ensure alias?
dest_n = dest
else:
q.append(dest)
dest_n = dest._as_entity().alias(alias_map[dest])
join_type = self.join_map[join.join_type or JOIN_INNER]
join_stmt = SQL('%s JOIN' % (join_type))
clauses.append(
Clause(join_stmt, dest_n, SQL('ON'), constraint))
return clauses
def generate_select(self, query, alias_map=None):
model = query.model_class
db = model._meta.database
alias_map = self.calculate_alias_map(query, alias_map)
if isinstance(query, CompoundSelect):
clauses = [_StripParens(query)]
else:
if not query._distinct:
clauses = [SQL('SELECT')]
else:
clauses = [SQL('SELECT DISTINCT')]
if query._distinct not in (True, False):
clauses += [SQL('ON'), EnclosedClause(*query._distinct)]
select_clause = Clause(*query._select)
select_clause.glue = ', '
clauses.extend((select_clause, SQL('FROM')))
if query._from is None:
clauses.append(model._as_entity().alias(alias_map[model]))
else:
clauses.append(CommaClause(*query._from))
if query._windows is not None:
clauses.append(SQL('WINDOW'))
clauses.append(CommaClause(*[
Clause(
SQL(window._alias),
SQL('AS'),
window.__sql__())
for window in query._windows]))
join_clauses = self.generate_joins(query._joins, model, alias_map)
if join_clauses:
clauses.extend(join_clauses)
if query._where is not None:
clauses.extend([SQL('WHERE'), query._where])
if query._group_by:
clauses.extend([SQL('GROUP BY'), CommaClause(*query._group_by)])
if query._having:
clauses.extend([SQL('HAVING'), query._having])
if query._order_by:
clauses.extend([SQL('ORDER BY'), CommaClause(*query._order_by)])
if query._limit or (query._offset and db.limit_max):
limit = query._limit or db.limit_max
clauses.append(SQL('LIMIT %s' % limit))
if query._offset:
clauses.append(SQL('OFFSET %s' % query._offset))
for_update, no_wait = query._for_update
if for_update:
stmt = 'FOR UPDATE NOWAIT' if no_wait else 'FOR UPDATE'
clauses.append(SQL(stmt))
return self.build_query(clauses, alias_map)
def generate_update(self, query):
model = query.model_class
alias_map = self.alias_map_class()
alias_map.add(model, model._meta.db_table)
clauses = [SQL('UPDATE'), model._as_entity(), SQL('SET')]
update = []
for field, value in self._sorted_fields(query._update):
if not isinstance(value, (Node, Model)):
value = Param(value, conv=field.db_value)
update.append(Expression(
field._as_entity(with_table=False),
OP_EQ,
value,
flat=True)) # No outer parens, no table alias.
clauses.append(CommaClause(*update))
if query._where:
clauses.extend([SQL('WHERE'), query._where])
return self.build_query(clauses, alias_map)
def _get_field_clause(self, fields):
return EnclosedClause(*[
field._as_entity(with_table=False) for field in fields])
def generate_insert(self, query):
model = query.model_class
alias_map = self.alias_map_class()
alias_map.add(model, model._meta.db_table)
statement = query._upsert and 'INSERT OR REPLACE INTO' or 'INSERT INTO'
clauses = [SQL(statement), model._as_entity()]
if query._query is not None:
# This INSERT query is of the form INSERT INTO ... SELECT FROM.
if query._fields:
clauses.append(self._get_field_clause(query._fields))
clauses.append(_StripParens(query._query))
elif query._rows is not None:
fields, value_clauses = [], []
have_fields = False
for row_dict in query._iter_rows():
if not have_fields:
fields = sorted(
row_dict.keys(), key=operator.attrgetter('_sort_key'))
have_fields = True
values = []
for field in fields:
value = row_dict[field]
if not isinstance(value, (Node, Model)):
value = Param(value, conv=field.db_value)
values.append(value)
value_clauses.append(EnclosedClause(*values))
if fields:
clauses.extend([
self._get_field_clause(fields),
SQL('VALUES'),
CommaClause(*value_clauses)])
return self.build_query(clauses, alias_map)
def generate_delete(self, query):
model = query.model_class
clauses = [SQL('DELETE FROM'), model._as_entity()]
if query._where:
clauses.extend([SQL('WHERE'), query._where])
return self.build_query(clauses)
def field_definition(self, field):
column_type = self.get_column_type(field.get_db_field())
ddl = field.__ddl__(column_type)
return Clause(*ddl)
def foreign_key_constraint(self, field):
ddl = [
SQL('FOREIGN KEY'),
EnclosedClause(field._as_entity()),
SQL('REFERENCES'),
field.rel_model._as_entity(),
EnclosedClause(field.to_field._as_entity())]
if field.on_delete:
ddl.append(SQL('ON DELETE %s' % field.on_delete))
if field.on_update:
ddl.append(SQL('ON UPDATE %s' % field.on_update))
return Clause(*ddl)
def return_parsed_node(function_name):
# TODO: treat all `generate_` functions as returning clauses, instead
# of SQL/params.
def inner(self, *args, **kwargs):
fn = getattr(self, function_name)
return self.parse_node(fn(*args, **kwargs))
return inner
def _create_foreign_key(self, model_class, field, constraint=None):
constraint = constraint or 'fk_%s_%s_refs_%s' % (
model_class._meta.db_table,
field.db_column,
field.rel_model._meta.db_table)
fk_clause = self.foreign_key_constraint(field)
return Clause(
SQL('ALTER TABLE'),
model_class._as_entity(),
SQL('ADD CONSTRAINT'),
Entity(constraint),
*fk_clause.nodes)
create_foreign_key = return_parsed_node('_create_foreign_key')
def _create_table(self, model_class, safe=False):
statement = 'CREATE TABLE IF NOT EXISTS' if safe else 'CREATE TABLE'
meta = model_class._meta
columns, constraints = [], []
if isinstance(meta.primary_key, CompositeKey):
pk_cols = [meta.fields[f]._as_entity()
for f in meta.primary_key.field_names]
constraints.append(Clause(
SQL('PRIMARY KEY'), EnclosedClause(*pk_cols)))
for field in meta.get_fields():
columns.append(self.field_definition(field))
if isinstance(field, ForeignKeyField) and not field.deferred:
constraints.append(self.foreign_key_constraint(field))
return Clause(
SQL(statement),
model_class._as_entity(),
EnclosedClause(*(columns + constraints)))
create_table = return_parsed_node('_create_table')
def _drop_table(self, model_class, fail_silently=False, cascade=False):
statement = 'DROP TABLE IF EXISTS' if fail_silently else 'DROP TABLE'
ddl = [SQL(statement), model_class._as_entity()]
if cascade:
ddl.append(SQL('CASCADE'))
return Clause(*ddl)
drop_table = return_parsed_node('_drop_table')
def index_name(self, table, columns):
index = '%s_%s' % (table, '_'.join(columns))
if len(index) > 64:
index_hash = hashlib.md5(index.encode('utf-8')).hexdigest()
index = '%s_%s' % (table, index_hash)
return index
def _create_index(self, model_class, fields, unique, *extra):
tbl_name = model_class._meta.db_table
statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX'
index_name = self.index_name(tbl_name, [f.db_column for f in fields])
return Clause(
SQL(statement),
Entity(index_name),
SQL('ON'),
model_class._as_entity(),
EnclosedClause(*[field._as_entity() for field in fields]),
*extra)
create_index = return_parsed_node('_create_index')
def _create_sequence(self, sequence_name):
return Clause(SQL('CREATE SEQUENCE'), Entity(sequence_name))
create_sequence = return_parsed_node('_create_sequence')
def _drop_sequence(self, sequence_name):
return Clause(SQL('DROP SEQUENCE'), Entity(sequence_name))
drop_sequence = return_parsed_node('_drop_sequence')
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into python representations
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self.__ct = 0
self.__idx = 0
self._result_cache = []
self._populated = False
self._initialized = False
if meta is not None:
self.column_meta, self.join_meta = meta
else:
self.column_meta = self.join_meta = None
def __iter__(self):
self.__idx = 0
if not self._populated:
return self
else:
return iter(self._result_cache)
def process_row(self, row):
return row
def iterate(self):
row = self.cursor.fetchone()
if not row:
self._populated = True
if not getattr(self.cursor, 'name', None):
self.cursor.close()
raise StopIteration
elif not self._initialized:
self.initialize(self.cursor.description)
self._initialized = True
return self.process_row(row)
def iterator(self):
while True:
yield self.iterate()
def next(self):
if self.__idx < self.__ct:
inst = self._result_cache[self.__idx]
self.__idx += 1
return inst
obj = self.iterate()
self._result_cache.append(obj)
self.__ct += 1
self.__idx += 1
return obj
__next__ = next
def fill_cache(self, n=None):
n = n or float('Inf')
if n < 0:
raise ValueError('Negative values are not supported.')
self.__idx = self.__ct
while not self._populated and (n > self.__ct):
try:
self.next()
except StopIteration:
break
class ExtQueryResultWrapper(QueryResultWrapper):
def initialize(self, description):
model = self.model
conv = []
identity = lambda x: x
for i in range(len(description)):
func = identity
column = description[i][0]
found = False
if self.column_meta is not None:
try:
select_column = self.column_meta[i]
except IndexError:
pass
else:
if isinstance(select_column, Field):
func = select_column.python_value
column = select_column._alias or select_column.name
found = True
elif (isinstance(select_column, Func) and
len(select_column.arguments) and
isinstance(select_column.arguments[0], Field)):
if select_column._coerce:
# Special-case handling aggregations.
func = select_column.arguments[0].python_value
found = True
if not found and column in model._meta.columns:
field_obj = model._meta.columns[column]
column = field_obj.name
func = field_obj.python_value
conv.append((i, column, func))
self.conv = conv
class TuplesQueryResultWrapper(ExtQueryResultWrapper):
def process_row(self, row):
return tuple([self.conv[i][2](col) for i, col in enumerate(row)])
class NaiveQueryResultWrapper(ExtQueryResultWrapper):
def process_row(self, row):
instance = self.model()
for i, column, func in self.conv:
setattr(instance, column, func(row[i]))
instance._prepare_instance()
return instance
class DictQueryResultWrapper(ExtQueryResultWrapper):
def process_row(self, row):
res = {}
for i, column, func in self.conv:
res[column] = func(row[i])
return res
class ModelQueryResultWrapper(QueryResultWrapper):
def initialize(self, description):
self.column_map, model_set = self.generate_column_map()
self.join_list = self.generate_join_list(model_set)
def generate_column_map(self):
column_map = []
models = set([self.model])
for i, node in enumerate(self.column_meta):
attr = conv = None
if isinstance(node, Field):
if isinstance(node, FieldProxy):
key = node._model_alias
constructor = node.model
else:
key = constructor = node.model_class
attr = node.name
conv = node.python_value
else:
key = constructor = self.model
if isinstance(node, Expression) and node._alias:
attr = node._alias
column_map.append((key, constructor, attr, conv))
models.add(key)
return column_map, models
def generate_join_list(self, models):
join_list = []
joins = self.join_meta
stack = [self.model]
while stack:
current = stack.pop()
if current not in joins:
continue
for join in joins[current]:
if join.dest in models:
join_list.append(join.join_metadata(current))
stack.append(join.dest)
return join_list
def process_row(self, row):
collected = self.construct_instances(row)
instances = self.follow_joins(collected)
for i in instances:
i._prepare_instance()
return instances[0]
def construct_instances(self, row, keys=None):
collected_models = {}
for i, (key, constructor, attr, conv) in enumerate(self.column_map):
if keys is not None and key not in keys:
continue
value = row[i]
if key not in collected_models:
collected_models[key] = constructor()
instance = collected_models[key]
if attr is None:
attr = self.cursor.description[i][0]
if conv is not None:
value = conv(value)
setattr(instance, attr, value)
return collected_models
def follow_joins(self, collected):
prepared = [collected[self.model]]
for (lhs, attr, rhs, to_field, related_name) in self.join_list:
inst = collected[lhs]
joined_inst = collected[rhs]
# Can we populate a value on the joined instance using the current?
if to_field is not None and attr in inst._data:
if getattr(joined_inst, to_field) is None:
setattr(joined_inst, to_field, inst._data[attr])
setattr(inst, attr, joined_inst)
prepared.append(joined_inst)
return prepared
class AggregateQueryResultWrapper(ModelQueryResultWrapper):
def __init__(self, *args, **kwargs):
self._row = []
super(AggregateQueryResultWrapper, self).__init__(*args, **kwargs)
def initialize(self, description):
super(AggregateQueryResultWrapper, self).initialize(description)
# Collect the set of all models queried.
self.all_models = set()
for key, _, _, _ in self.column_map:
self.all_models.add(key)
# Prepare data structure for analyzing unique rows.
self.models_with_aggregate = set()
self.back_references = {}
for (src_model, _, dest_model, _, related_name) in self.join_list:
if related_name:
self.models_with_aggregate.add(src_model)
self.back_references[dest_model] = (src_model, related_name)
self.columns_to_compare = {}
for idx, (_, model_class, col_name, _) in enumerate(self.column_map):
if model_class in self.models_with_aggregate:
self.columns_to_compare.setdefault(model_class, [])
self.columns_to_compare[model_class].append((idx, col_name))
def read_model_data(self, row):
models = {}
for model_class, column_data in self.columns_to_compare.items():
models[model_class] = []
for idx, col_name in column_data:
models[model_class].append(row[idx])
return models
def iterate(self):
if self._row:
row = self._row.pop()
else:
row = self.cursor.fetchone()
if not row:
self._populated = True
if not getattr(self.cursor, 'name', None):
self.cursor.close()
raise StopIteration
elif not self._initialized:
self.initialize(self.cursor.description)
self._initialized = True
def _get_pk(instance):
if isinstance(instance._meta.primary_key, CompositeKey):
return tuple([
instance._data[field_name]
for field_name in instance._meta.primary_key.field_names])
return instance._get_pk_value()
identity_map = {}
_constructed = self.construct_instances(row)
primary_instance = _constructed[self.model]
for model_class, instance in _constructed.items():
identity_map[model_class] = OrderedDict()
identity_map[model_class][_get_pk(instance)] = instance
model_data = self.read_model_data(row)
while True:
cur_row = self.cursor.fetchone()
if cur_row is None:
break
duplicate_models = set()
cur_row_data = self.read_model_data(cur_row)
for model_class, data in cur_row_data.items():
if model_data[model_class] == data:
duplicate_models.add(model_class)
if not duplicate_models:
self._row.append(cur_row)
break
different_models = self.all_models - duplicate_models
new_instances = self.construct_instances(cur_row, different_models)
for model_class, instance in new_instances.items():
# Do not include any instances which are comprised solely of
# NULL values.
pk_value = _get_pk(instance)
if [val for val in instance._data.values() if val is not None]:
identity_map[model_class][pk_value] = instance
stack = [self.model]
instances = [primary_instance]
while stack:
current = stack.pop()
if current not in self.join_meta:
continue
for join in self.join_meta[current]:
foreign_key = current._meta.rel_for_model(join.dest, join.on)
if foreign_key:
if join.dest not in identity_map:
continue
for pk, instance in identity_map[current].items():
joined_inst = identity_map[join.dest][
instance._data[foreign_key.name]]
setattr(instance, foreign_key.name, joined_inst)
instances.append(joined_inst)
else:
if not isinstance(join.dest, Node):
backref = current._meta.reverse_rel_for_model(
join.dest, join.on)
if not backref:
continue
else:
continue
attr_name = backref.related_name
for instance in identity_map[current].values():
setattr(instance, attr_name, [])
if join.dest not in identity_map:
continue
for pk, instance in identity_map[join.dest].items():
if pk is None:
continue
try:
joined_inst = identity_map[current][
instance._data[backref.name]]
except KeyError:
continue
getattr(joined_inst, attr_name).append(instance)
instances.append(instance)
stack.append(join.dest)
for instance in instances:
instance._prepare_instance()
return primary_instance
class Query(Node):
"""Base class representing a database query on one or more tables."""
require_commit = True
def __init__(self, model_class):
super(Query, self).__init__()
self.model_class = model_class
self.database = model_class._meta.database
self._dirty = True
self._query_ctx = model_class
self._joins = {self.model_class: []} # Join graph as adjacency list.
self._where = None
def __repr__(self):
sql, params = self.sql()
return '%s %s %s' % (self.model_class, sql, params)
def clone(self):
query = type(self)(self.model_class)
query.database = self.database
return self._clone_attributes(query)
def _clone_attributes(self, query):
if self._where is not None:
query._where = self._where.clone()
query._joins = self._clone_joins()
query._query_ctx = self._query_ctx
return query
def _clone_joins(self):
return dict(
(mc, list(j)) for mc, j in self._joins.items())
def _add_query_clauses(self, initial, expressions, conjunction=None):
reduced = reduce(operator.and_, expressions)
if initial is None:
return reduced
conjunction = conjunction or operator.and_
return conjunction(initial, reduced)
@returns_clone
def where(self, *expressions):
self._where = self._add_query_clauses(self._where, expressions)
@returns_clone
def orwhere(self, *expressions):
self._where = self._add_query_clauses(
self._where, expressions, operator.or_)
@returns_clone
def join(self, dest, join_type=None, on=None):
if not on:
require_join_condition = [
isinstance(dest, SelectQuery),
(isclass(dest) and not self._query_ctx._meta.rel_exists(dest))]
if any(require_join_condition):
raise ValueError('A join condition must be specified.')
elif isinstance(on, basestring):
on = self._query_ctx._meta.fields[on]
self._joins.setdefault(self._query_ctx, [])
self._joins[self._query_ctx].append(Join(dest, join_type, on))
if not isinstance(dest, SelectQuery):
self._query_ctx = dest
@returns_clone
def switch(self, model_class=None):
"""Change or reset the query context."""
self._query_ctx = model_class or self.model_class
def ensure_join(self, lm, rm, on=None):
ctx = self._query_ctx
for join in self._joins.get(lm, []):
if join.dest == rm:
return self
return self.switch(lm).join(rm, on=on).switch(ctx)
def convert_dict_to_node(self, qdict):
accum = []
joins = []
relationship = (ForeignKeyField, ReverseRelationDescriptor)
for key, value in sorted(qdict.items()):
curr = self.model_class
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
else:
op = OP_EQ
for piece in key.split('__'):
model_attr = getattr(curr, piece)
if isinstance(model_attr, relationship):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(Expression(model_attr, op, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
dq_node = Node()
if args:
dq_node &= reduce(operator.and_, [a.clone() for a in args])
if kwargs:
dq_node &= DQ(**kwargs)
# dq_node should now be an Expression, lhs = Node(), rhs = ...
q = deque([dq_node])
dq_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expression):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
dq_joins.update(joins)
expression = reduce(operator.and_, query)
# Apply values from the DQ object.
expression._negated = piece._negated
expression._alias = piece._alias
setattr(curr, side, expression)
else:
q.append(piece)
dq_node = dq_node.rhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model_class, field.rel_model
field_obj = field
elif isinstance(field, ReverseRelationDescriptor):
lm, rm = field.field.rel_model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def compiler(self):
return self.database.compiler()
def sql(self):
raise NotImplementedError
def _execute(self):
sql, params = self.sql()
return self.database.execute_sql(sql, params, self.require_commit)
def execute(self):
raise NotImplementedError
def scalar(self, as_tuple=False, convert=False):
if convert:
row = self.tuples().first()
else:
row = self._execute().fetchone()
if row and not as_tuple:
return row[0]
else:
return row
class RawQuery(Query):
"""
Execute a SQL query, returning a standard iterable interface that returns
model instances.
"""
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
self._qr = None
self._tuples = False
self._dicts = False
super(RawQuery, self).__init__(model)
def clone(self):
query = RawQuery(self.model_class, self._sql, *self._params)
query._tuples = self._tuples
query._dicts = self._dicts
return query
join = not_allowed('joining')
where = not_allowed('where')
switch = not_allowed('switch')
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
def sql(self):
return self._sql, self._params
def execute(self):
if self._qr is None:
if self._tuples:
ResultWrapper = TuplesQueryResultWrapper
elif self._dicts:
ResultWrapper = DictQueryResultWrapper
else:
ResultWrapper = NaiveQueryResultWrapper
self._qr = ResultWrapper(self.model_class, self._execute(), None)
return self._qr
def __iter__(self):
return iter(self.execute())
class SelectQuery(Query):
_node_type = 'select_query'
def __init__(self, model_class, *selection):
super(SelectQuery, self).__init__(model_class)
self.require_commit = self.database.commit_select
self.__select(*selection)
self._from = None
self._group_by = None
self._having = None
self._order_by = None
self._windows = None
self._limit = None
self._offset = None
self._distinct = False
self._for_update = (False, False)
self._naive = False
self._tuples = False
self._dicts = False
self._aggregate_rows = False
self._alias = None
self._qr = None
def _clone_attributes(self, query):
query = super(SelectQuery, self)._clone_attributes(query)
query._explicit_selection = self._explicit_selection
query._select = list(self._select)
if self._from is not None:
query._from = []
for f in self._from:
if isinstance(f, Node):
query._from.append(f.clone())
else:
query._from.append(f)
if self._group_by is not None:
query._group_by = list(self._group_by)
if self._having:
query._having = self._having.clone()
if self._order_by is not None:
query._order_by = list(self._order_by)
if self._windows is not None:
query._windows = list(self._windows)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._for_update = self._for_update
query._naive = self._naive
query._tuples = self._tuples
query._dicts = self._dicts
query._aggregate_rows = self._aggregate_rows
query._alias = self._alias
return query
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Node):
accum.append(arg)
elif isinstance(arg, Query):
accum.append(arg)
elif isinstance(arg, ModelAlias):
accum.extend(arg.get_proxy_fields())
elif isclass(arg) and issubclass(arg, Model):
accum.extend(arg._meta.get_fields())
return accum
def compound_op(operator):
def inner(self, other):
supported_ops = self.model_class._meta.database.compound_operations
if operator not in supported_ops:
raise ValueError(
'Your database does not support %s' % operator)
return CompoundSelect(self.model_class, self, operator, other)
return inner
_compound_op_static = staticmethod(compound_op)
__or__ = compound_op('UNION')
__and__ = compound_op('INTERSECT')
__sub__ = compound_op('EXCEPT')
def __xor__(self, rhs):
# Symmetric difference, should just be (self | rhs) - (self & rhs)...
wrapped_rhs = self.model_class.select(SQL('*')).from_(
EnclosedClause((self & rhs)).alias('_')).order_by()
return (self | rhs) - wrapped_rhs
def union_all(self, rhs):
return SelectQuery._compound_op_static('UNION ALL')(self, rhs)
def __select(self, *selection):
self._explicit_selection = len(selection) > 0
selection = selection or self.model_class._meta.get_fields()
self._select = self._model_shorthand(selection)
select = returns_clone(__select)
@returns_clone
def from_(self, *args):
self._from = None
if args:
self._from = list(args)
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def having(self, *expressions):
self._having = self._add_query_clauses(self._having, expressions)
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def window(self, *windows):
self._windows = list(windows)
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def distinct(self, is_distinct=True):
self._distinct = is_distinct
@returns_clone
def for_update(self, for_update=True, nowait=False):
self._for_update = (for_update, nowait)
@returns_clone
def naive(self, naive=True):
self._naive = naive
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
@returns_clone
def aggregate_rows(self, aggregate_rows=True):
self._aggregate_rows = aggregate_rows
@returns_clone
def alias(self, alias=None):
self._alias = alias
def annotate(self, rel_model, annotation=None):
if annotation is None:
annotation = fn.Count(rel_model._meta.primary_key).alias('count')
query = self.clone()
query = query.ensure_join(query._query_ctx, rel_model)
if not query._group_by:
query._group_by = [x.alias() for x in query._select]
query._select = tuple(query._select) + (annotation,)
return query
def _aggregate(self, aggregation=None):
if aggregation is None:
aggregation = fn.Count(SQL('*'))
query = self.order_by()
query._select = [aggregation]
return query
def aggregate(self, aggregation=None, convert=True):
return self._aggregate(aggregation).scalar(convert=convert)
def count(self, clear_limit=False):
if self._distinct or self._group_by or self._limit or self._offset:
return self.wrapped_count(clear_limit=clear_limit)
# defaults to a count() of the primary key
return self.aggregate(convert=False) or 0
def wrapped_count(self, clear_limit=False):
clone = self.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = self.model_class.raw(wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
clone._select = [SQL('1')]
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist(
'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s'
% self.sql())
def first(self):
res = self.execute()
res.fill_cache(1)
try:
return res._result_cache[0]
except IndexError:
pass
def sql(self):
return self.compiler().generate_select(self)
def verify_naive(self):
model_class = self.model_class
for node in self._select:
if isinstance(node, Field) and node.model_class != model_class:
return False
return True
def get_query_meta(self):
return (self._select, self._joins)
def execute(self):
if self._dirty or not self._qr:
model_class = self.model_class
query_meta = self.get_query_meta()
if self._tuples:
ResultWrapper = TuplesQueryResultWrapper
elif self._dicts:
ResultWrapper = DictQueryResultWrapper
elif self._naive or not self._joins or self.verify_naive():
ResultWrapper = NaiveQueryResultWrapper
elif self._aggregate_rows:
ResultWrapper = AggregateQueryResultWrapper
else:
ResultWrapper = ModelQueryResultWrapper
self._qr = ResultWrapper(model_class, self._execute(), query_meta)
self._dirty = False
return self._qr
else:
return self._qr
def __iter__(self):
return iter(self.execute())
def iterator(self):
return iter(self.execute().iterator())
def __getitem__(self, value):
res = self.execute()
if isinstance(value, slice):
index = value.stop
else:
index = value
if index is not None and index >= 0:
index += 1
res.fill_cache(index)
return res._result_cache[value]
if PY3:
def __hash__(self):
return id(self)
class CompoundSelect(SelectQuery):
_node_type = 'compound_select_query'
def __init__(self, model_class, lhs=None, operator=None, rhs=None):
self.lhs = lhs
self.operator = operator
self.rhs = rhs
super(CompoundSelect, self).__init__(model_class, [])
def _clone_attributes(self, query):
query = super(CompoundSelect, self)._clone_attributes(query)
query.lhs = self.lhs
query.operator = self.operator
query.rhs = self.rhs
return query
def get_query_meta(self):
return self.lhs.get_query_meta()
class UpdateQuery(Query):
def __init__(self, model_class, update=None):
self._update = update
super(UpdateQuery, self).__init__(model_class)
def _clone_attributes(self, query):
query = super(UpdateQuery, self)._clone_attributes(query)
query._update = dict(self._update)
return query
join = not_allowed('joining')
def sql(self):
return self.compiler().generate_update(self)
def execute(self):
return self.database.rows_affected(self._execute())
class InsertQuery(Query):
def __init__(self, model_class, field_dict=None, rows=None,
fields=None, query=None):
super(InsertQuery, self).__init__(model_class)
self._upsert = False
self._is_multi_row_insert = rows is not None or query is not None
if rows is not None:
self._rows = rows
else:
self._rows = [field_dict or {}]
self._fields = fields
self._query = query
def _iter_rows(self):
model_meta = self.model_class._meta
valid_fields = (set(model_meta.fields.keys()) |
set(model_meta.fields.values()))
def validate_field(field):
if field not in valid_fields:
raise KeyError('"%s" is not a recognized field.' % field)
defaults = model_meta._default_dict
callables = model_meta._default_callables
for row_dict in self._rows:
field_row = defaults.copy()
seen = set()
for key in row_dict:
validate_field(key)
if key in model_meta.fields:
field = model_meta.fields[key]
else:
field = key
field_row[field] = row_dict[key]
seen.add(field)
if callables:
for field in callables:
if field not in seen:
field_row[field] = callables[field]()
yield field_row
def _clone_attributes(self, query):
query = super(InsertQuery, self)._clone_attributes(query)
query._rows = self._rows
query._upsert = self._upsert
query._is_multi_row_insert = self._is_multi_row_insert
query._fields = self._fields
query._query = self._query
return query
join = not_allowed('joining')
where = not_allowed('where clause')
@returns_clone
def upsert(self, upsert=True):
self._upsert = upsert
def sql(self):
return self.compiler().generate_insert(self)
def execute(self):
if self._is_multi_row_insert and self._query is None:
if not self.database.insert_many:
last_id = None
for row in self._rows:
last_id = InsertQuery(self.model_class, row).execute()
return last_id
return self.database.last_insert_id(self._execute(), self.model_class)
class DeleteQuery(Query):
join = not_allowed('joining')
def sql(self):
return self.compiler().generate_delete(self)
def execute(self):
return self.database.rows_affected(self._execute())
IndexMetadata = namedtuple(
'IndexMetadata',
('name', 'sql', 'columns', 'unique', 'table'))
ColumnMetadata = namedtuple(
'ColumnMetadata',
('name', 'data_type', 'null', 'primary_key', 'table'))
ForeignKeyMetadata = namedtuple(
'ForeignKeyMetadata',
('column', 'dest_table', 'dest_column', 'table'))
class PeeweeException(Exception): pass
class ImproperlyConfigured(PeeweeException): pass
class DatabaseError(PeeweeException): pass
class DataError(DatabaseError): pass
class IntegrityError(DatabaseError): pass
class InterfaceError(PeeweeException): pass
class InternalError(DatabaseError): pass
class NotSupportedError(DatabaseError): pass
class OperationalError(DatabaseError): pass
class ProgrammingError(DatabaseError): pass
class ExceptionWrapper(object):
__slots__ = ['exceptions']
def __init__(self, exceptions):
self.exceptions = exceptions
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
if exc_type.__name__ in self.exceptions:
new_type = self.exceptions[exc_type.__name__]
reraise(new_type, new_type(*exc_value.args), traceback)
class _BaseConnectionLocal(object):
def __init__(self, **kwargs):
super(_BaseConnectionLocal, self).__init__(**kwargs)
self.autocommit = None
self.closed = True
self.conn = None
self.context_stack = []
self.transactions = []
class _ConnectionLocal(_BaseConnectionLocal, threading.local):
pass
class Database(object):
commit_select = False
compiler_class = QueryCompiler
compound_operations = ['UNION', 'INTERSECT', 'EXCEPT', 'UNION ALL']
distinct_on = False
drop_cascade = False
field_overrides = {}
foreign_keys = True
for_update = False
for_update_nowait = False
insert_many = True
interpolation = '?'
limit_max = None
op_overrides = {}
quote_char = '"'
reserved_tables = []
savepoints = True
sequences = False
subquery_delete_same_table = True
window_functions = False
exceptions = {
'ConstraintError': IntegrityError,
'DatabaseError': DatabaseError,
'DataError': DataError,
'IntegrityError': IntegrityError,
'InterfaceError': InterfaceError,
'InternalError': InternalError,
'NotSupportedError': NotSupportedError,
'OperationalError': OperationalError,
'ProgrammingError': ProgrammingError}
def __init__(self, database, threadlocals=True, autocommit=True,
fields=None, ops=None, autorollback=False, **connect_kwargs):
self.init(database, **connect_kwargs)
if threadlocals:
self.__local = _ConnectionLocal()
else:
self.__local = _BaseConnectionLocal()
self._conn_lock = threading.Lock()
self.autocommit = autocommit
self.autorollback = autorollback
self.field_overrides = merge_dict(self.field_overrides, fields or {})
self.op_overrides = merge_dict(self.op_overrides, ops or {})
def init(self, database, **connect_kwargs):
self.deferred = database is None
self.database = database
self.connect_kwargs = connect_kwargs
def exception_wrapper(self):
return ExceptionWrapper(self.exceptions)
def connect(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized '
'before opening connection')
with self.exception_wrapper():
self.__local.conn = self._connect(
self.database,
**self.connect_kwargs)
self.__local.closed = False
def close(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized '
'before closing connection')
with self.exception_wrapper():
self._close(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if self.__local.context_stack:
return self.__local.context_stack[-1].connection
if self.__local.closed:
self.connect()
return self.__local.conn
def is_closed(self):
return self.__local.closed
def get_cursor(self):
return self.get_conn().cursor()
def _close(self, conn):
conn.close()
def _connect(self, database, **kwargs):
raise NotImplementedError
@classmethod
def register_fields(cls, fields):
cls.field_overrides = merge_dict(cls.field_overrides, fields)
@classmethod
def register_ops(cls, ops):
cls.op_overrides = merge_dict(cls.op_overrides, ops)
def last_insert_id(self, cursor, model):
if model._meta.auto_increment:
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
def sql_error_handler(self, exception, sql, params, require_commit):
return True
def compiler(self):
return self.compiler_class(
self.quote_char, self.interpolation, self.field_overrides,
self.op_overrides)
def execute_sql(self, sql, params=None, require_commit=True):
logger.debug((sql, params))
with self.exception_wrapper():
cursor = self.get_cursor()
try:
cursor.execute(sql, params or ())
except Exception as exc:
if self.get_autocommit() and self.autorollback:
self.rollback()
if self.sql_error_handler(exc, sql, params, require_commit):
raise
else:
if require_commit and self.get_autocommit():
self.commit()
return cursor
def begin(self):
pass
def commit(self):
self.get_conn().commit()
def rollback(self):
self.get_conn().rollback()
def set_autocommit(self, autocommit):
self.__local.autocommit = autocommit
def get_autocommit(self):
if self.__local.autocommit is None:
self.set_autocommit(self.autocommit)
return self.__local.autocommit
def push_execution_context(self, transaction):
self.__local.context_stack.append(transaction)
def pop_execution_context(self):
self.__local.context_stack.pop()
def execution_context_depth(self):
return len(self.__local.context_stack)
def execution_context(self, with_transaction=True):
return ExecutionContext(self, with_transaction=with_transaction)
def push_transaction(self, transaction):
self.__local.transactions.append(transaction)
def pop_transaction(self):
self.__local.transactions.pop()
def transaction_depth(self):
return len(self.__local.transactions)
def transaction(self):
return transaction(self)
def commit_on_success(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self.transaction():
return func(*args, **kwargs)
return inner
def savepoint(self, sid=None):
if not self.savepoints:
raise NotImplementedError
return savepoint(self, sid)
def atomic(self):
return _atomic(self)
def get_tables(self, schema=None):
raise NotImplementedError
def get_indexes(self, table, schema=None):
raise NotImplementedError
def get_columns(self, table, schema=None):
raise NotImplementedError
def get_primary_keys(self, table, schema=None):
raise NotImplementedError
def get_foreign_keys(self, table, schema=None):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_table(self, model_class, safe=False):
qc = self.compiler()
return self.execute_sql(*qc.create_table(model_class, safe))
def create_tables(self, models, safe=False):
create_model_tables(models, fail_silently=safe)
def create_index(self, model_class, fields, unique=False):
qc = self.compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('Fields passed to "create_index" must be a list '
'or tuple: "%s"' % fields)
fobjs = [
model_class._meta.fields[f] if isinstance(f, basestring) else f
for f in fields]
return self.execute_sql(*qc.create_index(model_class, fobjs, unique))
def create_foreign_key(self, model_class, field, constraint=None):
qc = self.compiler()
return self.execute_sql(*qc.create_foreign_key(
model_class, field, constraint))
def create_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(*qc.create_sequence(seq))
def drop_table(self, model_class, fail_silently=False, cascade=False):
qc = self.compiler()
return self.execute_sql(*qc.drop_table(
model_class, fail_silently, cascade))
def drop_tables(self, models, safe=False, cascade=False):
drop_model_tables(models, fail_silently=safe, cascade=cascade)
def drop_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(*qc.drop_sequence(seq))
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(date_part, R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_TRUNC(SQL(date_part), date_field)
class SqliteDatabase(Database):
foreign_keys = False
insert_many = sqlite3 and sqlite3.sqlite_version_info >= (3, 7, 11, 0)
limit_max = -1
op_overrides = {
OP_LIKE: 'GLOB',
OP_ILIKE: 'LIKE',
}
def __init__(self, *args, **kwargs):
self._journal_mode = kwargs.pop('journal_mode', None)
super(SqliteDatabase, self).__init__(*args, **kwargs)
if not self.database:
self.database = ':memory:'
def _connect(self, database, **kwargs):
conn = sqlite3.connect(database, **kwargs)
conn.isolation_level = None
self._add_conn_hooks(conn)
return conn
def _add_conn_hooks(self, conn):
conn.create_function('date_part', 2, _sqlite_date_part)
conn.create_function('date_trunc', 2, _sqlite_date_trunc)
conn.create_function('regexp', 2, _sqlite_regexp)
if self._journal_mode:
self.execute_sql('PRAGMA journal_mode=%s;' % self._journal_mode)
def begin(self, lock_type='DEFERRED'):
self.execute_sql('BEGIN %s' % lock_type, require_commit=False)
def get_tables(self, schema=None):
cursor = self.execute_sql('SELECT name FROM sqlite_master WHERE '
'type = ? ORDER BY name;', ('table',))
return [row[0] for row in cursor.fetchall()]
def get_indexes(self, table, schema=None):
query = ('SELECT name, sql FROM sqlite_master '
'WHERE tbl_name = ? AND type = ? ORDER BY name')
cursor = self.execute_sql(query, (table, 'index'))
index_to_sql = dict(cursor.fetchall())
# Determine which indexes have a unique constraint.
unique_indexes = set()
cursor = self.execute_sql('PRAGMA index_list("%s")' % table)
for _, name, is_unique in cursor.fetchall():
if is_unique:
unique_indexes.add(name)
# Retrieve the indexed columns.
index_columns = {}
for index_name in sorted(index_to_sql):
cursor = self.execute_sql('PRAGMA index_info("%s")' % index_name)
index_columns[index_name] = [row[2] for row in cursor.fetchall()]
return [
IndexMetadata(
name,
index_to_sql[name],
index_columns[name],
name in unique_indexes,
table)
for name in sorted(index_to_sql)]
def get_columns(self, table, schema=None):
cursor = self.execute_sql('PRAGMA table_info("%s")' % table)
return [ColumnMetadata(row[1], row[2], not row[3], bool(row[5]), table)
for row in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA table_info("%s")' % table)
return [row[1] for row in cursor.fetchall() if row[-1]]
def get_foreign_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA foreign_key_list("%s")' % table)
return [ForeignKeyMetadata(row[3], row[2], row[4], table)
for row in cursor.fetchall()]
def savepoint(self, sid=None):
return savepoint_sqlite(self, sid)
def extract_date(self, date_part, date_field):
return fn.date_part(date_part, date_field)
def truncate_date(self, date_part, date_field):
return fn.strftime(SQLITE_DATE_TRUNC_MAPPING[date_part], date_field)
class PostgresqlDatabase(Database):
commit_select = True
distinct_on = True
drop_cascade = True
field_overrides = {
'blob': 'BYTEA',
'bool': 'BOOLEAN',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'primary_key': 'SERIAL',
'uuid': 'UUID',
}
for_update = True
for_update_nowait = True
interpolation = '%s'
op_overrides = {
OP_REGEXP: '~',
}
reserved_tables = ['user']
sequences = True
window_functions = True
register_unicode = True
def _connect(self, database, **kwargs):
if not psycopg2:
raise ImproperlyConfigured('psycopg2 must be installed.')
conn = psycopg2.connect(database=database, **kwargs)
if self.register_unicode:
pg_extensions.register_type(pg_extensions.UNICODE, conn)
pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn)
return conn
def last_insert_id(self, cursor, model):
meta = model._meta
schema = ''
if meta.schema:
schema = '%s.' % meta.schema
if meta.primary_key.sequence:
seq = meta.primary_key.sequence
elif meta.auto_increment:
seq = '%s_%s_seq' % (meta.db_table, meta.primary_key.db_column)
else:
seq = None
if seq:
cursor.execute("SELECT CURRVAL('%s\"%s\"')" % (schema, seq))
result = cursor.fetchone()[0]
if self.get_autocommit():
self.commit()
return result
def get_tables(self, schema='public'):
query = ('SELECT tablename FROM pg_catalog.pg_tables '
'WHERE schemaname = %s ORDER BY tablename')
return [r for r, in self.execute_sql(query, (schema,)).fetchall()]
def get_indexes(self, table, schema='public'):
query = """
SELECT
i.relname, idxs.indexdef, idx.indisunique,
array_to_string(array_agg(cols.attname), ',')
FROM pg_catalog.pg_class AS t
INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid
INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid
INNER JOIN pg_catalog.pg_indexes AS idxs ON
(idxs.tablename = t.relname AND idxs.indexname = i.relname)
LEFT OUTER JOIN pg_catalog.pg_attribute AS cols ON
(cols.attrelid = t.oid AND cols.attnum = ANY(idx.indkey))
WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s
GROUP BY i.relname, idxs.indexdef, idx.indisunique
ORDER BY idx.indisunique DESC, i.relname;"""
cursor = self.execute_sql(query, (table, 'r', schema))
return [IndexMetadata(row[0], row[1], row[3].split(','), row[2], table)
for row in cursor.fetchall()]
def get_columns(self, table, schema='public'):
query = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = %s"""
cursor = self.execute_sql(query, (table, schema))
pks = set(self.get_primary_keys(table, schema))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema='public'):
query = """
SELECT kc.column_name
FROM information_schema.table_constraints AS tc
INNER JOIN information_schema.key_column_usage AS kc ON (
tc.table_name = kc.table_name AND
tc.table_schema = kc.table_schema AND
tc.constraint_name = kc.constraint_name)
WHERE
tc.constraint_type = %s AND
tc.table_name = %s AND
tc.table_schema = %s"""
cursor = self.execute_sql(query, ('PRIMARY KEY', table, schema))
return [row for row, in cursor.fetchall()]
def get_foreign_keys(self, table, schema='public'):
sql = """
SELECT
kcu.column_name, ccu.table_name, ccu.column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON (tc.constraint_name = kcu.constraint_name AND
tc.constraint_schema = kcu.constraint_schema)
JOIN information_schema.constraint_column_usage AS ccu
ON (ccu.constraint_name = tc.constraint_name AND
ccu.constraint_schema = tc.constraint_schema)
WHERE
tc.constraint_type = 'FOREIGN KEY' AND
tc.table_name = %s AND
tc.table_schema = %s"""
cursor = self.execute_sql(sql, (table, schema))
return [ForeignKeyMetadata(row[0], row[1], row[2], table)
for row in cursor.fetchall()]
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*) FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
def set_search_path(self, *search_path):
path_params = ','.join(['%s'] * len(search_path))
self.execute_sql('SET search_path TO %s' % path_params, search_path)
class MySQLDatabase(Database):
commit_select = True
compound_operations = ['UNION', 'UNION ALL']
field_overrides = {
'bool': 'BOOL',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'float': 'FLOAT',
'primary_key': 'INTEGER AUTO_INCREMENT',
'text': 'LONGTEXT',
}
for_update = True
interpolation = '%s'
limit_max = 2 ** 64 - 1 # MySQL quirk
op_overrides = {
OP_LIKE: 'LIKE BINARY',
OP_ILIKE: 'LIKE',
OP_XOR: 'XOR',
}
quote_char = '`'
subquery_delete_same_table = False
def _connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb or PyMySQL must be installed.')
conn_kwargs = {
'charset': 'utf8',
'use_unicode': True,
}
conn_kwargs.update(kwargs)
if 'password' in conn_kwargs:
conn_kwargs['passwd'] = conn_kwargs.pop('password')
return mysql.connect(db=database, **conn_kwargs)
def get_tables(self, schema=None):
return [row for row, in self.execute_sql('SHOW TABLES')]
def get_indexes(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
unique = set()
indexes = {}
for row in cursor.fetchall():
if not row[1]:
unique.add(row[2])
indexes.setdefault(row[2], [])
indexes[row[2]].append(row[4])
return [IndexMetadata(name, None, indexes[name], name in unique, table)
for name in indexes]
def get_columns(self, table, schema=None):
sql = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()"""
cursor = self.execute_sql(sql, (table,))
pks = set(self.get_primary_keys(table))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
return [row[4] for row in cursor.fetchall() if row[2] == 'PRIMARY']
def get_foreign_keys(self, table, schema=None):
query = """
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL"""
cursor = self.execute_sql(query, (table,))
return [
ForeignKeyMetadata(column, dest_table, dest_column, table)
for column, dest_table, dest_column in cursor.fetchall()]
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(R(date_part), R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_FORMAT(date_field, MYSQL_DATE_TRUNC_MAPPING[date_part])
class _callable_context_manager(object):
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
class ExecutionContext(_callable_context_manager):
def __init__(self, database, with_transaction=True):
self.database = database
self.with_transaction = with_transaction
def __enter__(self):
with self.database._conn_lock:
self.database.push_execution_context(self)
self.connection = self.database._connect(
self.database.database,
**self.database.connect_kwargs)
if self.with_transaction:
self.txn = self.database.transaction()
self.txn.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.database._conn_lock:
try:
if self.with_transaction:
if not exc_type:
self.txn.commit(False)
self.txn.__exit__(exc_type, exc_val, exc_tb)
finally:
self.database.pop_execution_context()
self.database._close(self.connection)
class _atomic(_callable_context_manager):
def __init__(self, db):
self.db = db
def __enter__(self):
if self.db.transaction_depth() == 0:
self._helper = self.db.transaction()
else:
self._helper = self.db.savepoint()
return self._helper.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._helper.__exit__(exc_type, exc_val, exc_tb)
class transaction(_callable_context_manager):
def __init__(self, db):
self.db = db
def _begin(self):
self.db.begin()
def commit(self, begin=True):
self.db.commit()
if begin:
self._begin()
def rollback(self, begin=True):
self.db.rollback()
if begin:
self._begin()
def __enter__(self):
self._orig = self.db.get_autocommit()
self.db.set_autocommit(False)
if self.db.transaction_depth() == 0:
self._begin()
self.db.push_transaction(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback(False)
elif self.db.transaction_depth() == 1:
try:
self.commit(False)
except:
self.rollback(False)
raise
finally:
self.db.set_autocommit(self._orig)
self.db.pop_transaction()
class savepoint(_callable_context_manager):
def __init__(self, db, sid=None):
self.db = db
_compiler = db.compiler()
self.sid = sid or 's' + uuid.uuid4().hex
self.quoted_sid = _compiler.quote(self.sid)
def _execute(self, query):
self.db.execute_sql(query, require_commit=False)
def commit(self):
self._execute('RELEASE SAVEPOINT %s;' % self.quoted_sid)
def rollback(self):
self._execute('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
def __enter__(self):
self._orig_autocommit = self.db.get_autocommit()
self.db.set_autocommit(False)
self._execute('SAVEPOINT %s;' % self.quoted_sid)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback()
else:
try:
self.commit()
except:
self.rollback()
raise
finally:
self.db.set_autocommit(self._orig_autocommit)
class savepoint_sqlite(savepoint):
def __enter__(self):
conn = self.db.get_conn()
# For sqlite, the connection's isolation_level *must* be set to None.
# The act of setting it, though, will break any existing savepoints,
# so only write to it if necessary.
if conn.isolation_level is not None:
self._orig_isolation_level = conn.isolation_level
conn.isolation_level = None
else:
self._orig_isolation_level = None
return super(savepoint_sqlite, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
return super(savepoint_sqlite, self).__exit__(
exc_type, exc_val, exc_tb)
finally:
if self._orig_isolation_level is not None:
self.db.get_conn().isolation_level = self._orig_isolation_level
class FieldProxy(Field):
def __init__(self, alias, field_instance):
self._model_alias = alias
self.model = self._model_alias.model_class
self.field_instance = field_instance
def clone_base(self):
return FieldProxy(self._model_alias, self.field_instance)
def coerce(self, value):
return self.field_instance.coerce(value)
def python_value(self, value):
return self.field_instance.python_value(value)
def db_value(self, value):
return self.field_instance.db_value(value)
def __getattr__(self, attr):
if attr == 'model_class':
return self._model_alias
return getattr(self.field_instance, attr)
class ModelAlias(object):
def __init__(self, model_class):
self.__dict__['model_class'] = model_class
def __getattr__(self, attr):
model_attr = getattr(self.model_class, attr)
if isinstance(model_attr, Field):
return FieldProxy(self, model_attr)
return model_attr
def __setattr__(self, attr, value):
raise AttributeError('Cannot set attributes on ModelAlias instances')
def get_proxy_fields(self):
return [
FieldProxy(self, f) for f in self.model_class._meta.get_fields()]
def select(self, *selection):
query = SelectQuery(self, *selection)
if self._meta.order_by:
query = query.order_by(*self._meta.order_by)
return query
class DoesNotExist(Exception): pass
if sqlite3:
default_database = SqliteDatabase('peewee.db')
else:
default_database = None
class ModelOptions(object):
def __init__(self, cls, database=None, db_table=None, indexes=None,
order_by=None, primary_key=None, table_alias=None,
constraints=None, schema=None, validate_backrefs=True,
**kwargs):
self.model_class = cls
self.name = cls.__name__.lower()
self.fields = {}
self.columns = {}
self.defaults = {}
self._default_by_name = {}
self._default_dict = {}
self._default_callables = {}
self.database = database or default_database
self.db_table = db_table
self.indexes = list(indexes or [])
self.order_by = order_by
self.primary_key = primary_key
self.table_alias = table_alias
self.constraints = constraints
self.schema = schema
self.validate_backrefs = validate_backrefs
self.auto_increment = None
self.rel = {}
self.reverse_rel = {}
for key, value in kwargs.items():
setattr(self, key, value)
self._additional_keys = set(kwargs.keys())
def prepared(self):
for field in self.fields.values():
if field.default is not None:
self.defaults[field] = field.default
if callable(field.default):
self._default_callables[field] = field.default
else:
self._default_dict[field] = field.default
self._default_by_name[field.name] = field.default
if self.order_by:
norm_order_by = []
for item in self.order_by:
if isinstance(item, Field):
prefix = '-' if item._ordering == 'DESC' else ''
item = prefix + item.name
field = self.fields[item.lstrip('-')]
if item.startswith('-'):
norm_order_by.append(field.desc())
else:
norm_order_by.append(field.asc())
self.order_by = norm_order_by
def get_default_dict(self):
dd = self._default_by_name.copy()
if self._default_callables:
for field, default in self._default_callables.items():
dd[field.name] = default()
return dd
def get_sorted_fields(self):
key = lambda i: i[1]._sort_key
return sorted(self.fields.items(), key=key)
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def get_field_index(self, field):
for i, (field_name, field_obj) in enumerate(self.get_sorted_fields()):
if field_name == field.name:
return i
return -1
def rel_for_model(self, model, field_obj=None):
is_field = isinstance(field_obj, Field)
is_node = not is_field and isinstance(field_obj, Node)
for field in self.get_fields():
if isinstance(field, ForeignKeyField) and field.rel_model == model:
is_match = any((
field_obj is None,
is_field and field_obj.name == field.name,
is_node and field_obj._alias == field.name))
if is_match:
return field
def reverse_rel_for_model(self, model, field_obj=None):
return model._meta.rel_for_model(self.model_class, field_obj)
def rel_exists(self, model):
return self.rel_for_model(model) or self.reverse_rel_for_model(model)
def related_models(self, backrefs=False):
models = []
stack = [self.model_class]
while stack:
model = stack.pop()
if model in models:
continue
models.append(model)
for fk in model._meta.rel.values():
stack.append(fk.rel_model)
if backrefs:
for fk in model._meta.reverse_rel.values():
stack.append(fk.model_class)
return models
class BaseModel(type):
inheritable = set(['constraints', 'database', 'indexes', 'order_by',
'primary_key', 'schema', 'validate_backrefs'])
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
model_pk = getattr(meta, 'primary_key', None)
parent_pk = None
# inherit any field descriptors by deep copying the underlying field
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = getattr(b, '_meta')
if parent_pk is None:
parent_pk = deepcopy(base_meta.primary_key)
all_inheritable = cls.inheritable | base_meta._additional_keys
for (k, v) in base_meta.__dict__.items():
if k in all_inheritable and k not in meta_options:
meta_options[k] = v
for (k, v) in b.__dict__.items():
if k in attrs:
continue
if isinstance(v, FieldDescriptor):
if not v.field.primary_key:
attrs[k] = deepcopy(v.field)
# initialize the new class and set the magic attributes
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
cls._data = None
cls._meta.indexes = list(cls._meta.indexes)
if not cls._meta.db_table:
cls._meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower())
# replace fields with field descriptors, calling the add_to_class hook
fields = []
for name, attr in cls.__dict__.items():
if isinstance(attr, Field):
if attr.primary_key and model_pk:
raise ValueError('primary key is overdetermined.')
elif attr.primary_key:
model_pk, pk_name = attr, name
else:
fields.append((attr, name))
if model_pk is None:
if parent_pk:
model_pk, pk_name = parent_pk, parent_pk.name
else:
model_pk, pk_name = PrimaryKeyField(primary_key=True), 'id'
elif isinstance(model_pk, CompositeKey):
pk_name = '_composite_key'
if model_pk is not False:
model_pk.add_to_class(cls, pk_name)
cls._meta.primary_key = model_pk
cls._meta.auto_increment = (
isinstance(model_pk, PrimaryKeyField) or
bool(model_pk.sequence))
for field, name in fields:
field.add_to_class(cls, name)
# create a repr and error class before finalizing
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %r>' % (
cls.__name__, self.__unicode__()))
exc_name = '%sDoesNotExist' % cls.__name__
exception_class = type(exc_name, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
cls._meta.prepared()
return cls
def __iter__(self):
return iter(self.select())
class Model(with_metaclass(BaseModel)):
def __init__(self, *args, **kwargs):
self._data = self._meta.get_default_dict()
self._dirty = set()
self._obj_cache = {}
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def alias(cls):
return ModelAlias(cls)
@classmethod
def select(cls, *selection):
query = SelectQuery(cls, *selection)
if cls._meta.order_by:
query = query.order_by(*cls._meta.order_by)
return query
@classmethod
def update(cls, **update):
fdict = dict((cls._meta.fields[f], v) for f, v in update.items())
return UpdateQuery(cls, fdict)
@classmethod
def insert(cls, **insert):
return InsertQuery(cls, insert)
@classmethod
def insert_many(cls, rows):
return InsertQuery(cls, rows=rows)
@classmethod
def insert_from(cls, fields, query):
return InsertQuery(cls, fields=fields, query=query)
@classmethod
def delete(cls):
return DeleteQuery(cls)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
inst._prepare_instance()
return inst
@classmethod
def get(cls, *query, **kwargs):
sq = cls.select().naive()
if query:
sq = sq.where(*query)
if kwargs:
sq = sq.filter(**kwargs)
return sq.get()
@classmethod
def get_or_create(cls, **kwargs):
sq = cls.select().filter(**kwargs)
try:
return sq.get()
except cls.DoesNotExist:
return cls.create(**kwargs)
@classmethod
def filter(cls, *dq, **query):
return cls.select().filter(*dq, **query)
@classmethod
def table_exists(cls):
kwargs = {}
if cls._meta.schema:
kwargs['schema'] = cls._meta.schema
return cls._meta.db_table in cls._meta.database.get_tables(**kwargs)
@classmethod
def create_table(cls, fail_silently=False):
if fail_silently and cls.table_exists():
return
db = cls._meta.database
pk = cls._meta.primary_key
if db.sequences and pk.sequence:
if not db.sequence_exists(pk.sequence):
db.create_sequence(pk.sequence)
db.create_table(cls)
cls._create_indexes()
@classmethod
def _fields_to_index(cls):
fields = []
for field in cls._meta.fields.values():
if field.primary_key:
continue
requires_index = any((
field.index,
field.unique,
isinstance(field, ForeignKeyField)))
if requires_index:
fields.append(field)
return fields
@classmethod
def _create_indexes(cls):
db = cls._meta.database
for field in cls._fields_to_index():
db.create_index(cls, [field], field.unique)
if cls._meta.indexes:
for fields, unique in cls._meta.indexes:
db.create_index(cls, fields, unique)
@classmethod
def sqlall(cls):
queries = []
compiler = cls._meta.database.compiler()
pk = cls._meta.primary_key
if cls._meta.database.sequences and pk.sequence:
queries.append(compiler.create_sequence(pk.sequence))
queries.append(compiler.create_table(cls))
for field in cls._fields_to_index():
queries.append(compiler.create_index(cls, [field], field.unique))
if cls._meta.indexes:
for field_names, unique in cls._meta.indexes:
fields = [cls._meta.fields[f] for f in field_names]
queries.append(compiler.create_index(cls, fields, unique))
return [sql for sql, _ in queries]
@classmethod
def drop_table(cls, fail_silently=False, cascade=False):
cls._meta.database.drop_table(cls, fail_silently, cascade)
@classmethod
def _as_entity(cls):
if cls._meta.schema:
return Entity(cls._meta.schema, cls._meta.db_table)
return Entity(cls._meta.db_table)
def _get_pk_value(self):
return getattr(self, self._meta.primary_key.name)
get_id = _get_pk_value # Backwards-compatibility.
def _set_pk_value(self, value):
setattr(self, self._meta.primary_key.name, value)
set_id = _set_pk_value # Backwards-compatibility.
def _pk_expr(self):
return self._meta.primary_key == self._get_pk_value()
def _prepare_instance(self):
self._dirty.clear()
self.prepared()
def prepared(self):
pass
def _prune_fields(self, field_dict, only):
new_data = {}
for field in only:
if field.name in field_dict:
new_data[field.name] = field_dict[field.name]
return new_data
def save(self, force_insert=False, only=None):
field_dict = dict(self._data)
pk_field = self._meta.primary_key
if only:
field_dict = self._prune_fields(field_dict, only)
if self._get_pk_value() is not None and not force_insert:
if isinstance(pk_field, CompositeKey):
for pk_part_name in pk_field.field_names:
field_dict.pop(pk_part_name, None)
else:
field_dict.pop(pk_field.name, None)
rows = self.update(**field_dict).where(self._pk_expr()).execute()
else:
pk = self._get_pk_value()
pk_from_cursor = self.insert(**field_dict).execute()
if pk_from_cursor is not None:
pk = pk_from_cursor
self._set_pk_value(pk) # Do not overwrite current ID with None.
rows = 1
self._dirty.clear()
return rows
def is_dirty(self):
return bool(self._dirty)
@property
def dirty_fields(self):
return [f for f in self._meta.get_fields() if f.name in self._dirty]
def dependencies(self, search_nullable=False):
model_class = type(self)
query = self.select().where(self._pk_expr())
stack = [(type(self), query)]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for rel_name, fk in klass._meta.reverse_rel.items():
rel_model = fk.model_class
if fk.rel_model is model_class:
node = (fk == self._data[fk.to_field.name])
subquery = rel_model.select().where(node)
else:
node = fk << query
subquery = rel_model.select().where(node)
if not fk.null or search_nullable:
stack.append((rel_model, subquery))
yield (node, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
dependencies = self.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model_class
if fk.null and not delete_nullable:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
return self.delete().where(self._pk_expr()).execute()
def __eq__(self, other):
return (
other.__class__ == self.__class__ and
self._get_pk_value() is not None and
other._get_pk_value() == self._get_pk_value())
def __ne__(self, other):
return not self == other
def prefetch_add_subquery(sq, subqueries):
fixed_queries = [PrefetchResult(sq)]
for i, subquery in enumerate(subqueries):
if not isinstance(subquery, Query) and issubclass(subquery, Model):
subquery = subquery.select()
subquery_model = subquery.model_class
fkf = backref = None
for j in reversed(range(i + 1)):
last_query = fixed_queries[j][0]
last_model = last_query.model_class
fkf = subquery_model._meta.rel_for_model(last_model)
backref = last_model._meta.rel_for_model(subquery_model)
if fkf or backref:
break
if not (fkf or backref):
raise AttributeError('Error: unable to find foreign key for '
'query: %s' % subquery)
if fkf:
inner_query = last_query.select(fkf.to_field)
fixed_queries.append(
PrefetchResult(subquery.where(fkf << inner_query), fkf, False))
elif backref:
q = subquery.where(backref.to_field << last_query.select(backref))
fixed_queries.append(PrefetchResult(q, backref, True))
return fixed_queries
__prefetched = namedtuple('__prefetched', (
'query', 'field', 'backref', 'rel_model', 'foreign_key_attr', 'model'))
class PrefetchResult(__prefetched):
def __new__(cls, query, field=None, backref=None, rel_model=None,
foreign_key_attr=None, model=None):
if field:
if backref:
rel_model = field.model_class
foreign_key_attr = field.to_field.name
else:
rel_model = field.rel_model
foreign_key_attr = field.name
model = query.model_class
return super(PrefetchResult, cls).__new__(
cls, query, field, backref, rel_model, foreign_key_attr, model)
def populate_instance(self, instance, id_map):
if self.backref:
identifier = instance._data[self.field.name]
if identifier in id_map:
setattr(instance, self.field.name, id_map[identifier])
else:
identifier = instance._data[self.field.to_field.name]
rel_instances = id_map.get(identifier, [])
attname = self.foreign_key_attr
dest = '%s_prefetch' % self.field.related_name
for inst in rel_instances:
setattr(inst, attname, instance)
setattr(instance, dest, rel_instances)
def store_instance(self, instance, id_map):
identity = self.field.to_field.python_value(
instance._data[self.foreign_key_attr])
if self.backref:
id_map[identity] = instance
else:
id_map.setdefault(identity, [])
id_map[identity].append(instance)
def prefetch(sq, *subqueries):
if not subqueries:
return sq
fixed_queries = prefetch_add_subquery(sq, subqueries)
deps = {}
rel_map = {}
for prefetch_result in reversed(fixed_queries):
query_model = prefetch_result.model
if prefetch_result.field:
rel_map.setdefault(prefetch_result.rel_model, [])
rel_map[prefetch_result.rel_model].append(prefetch_result)
deps[query_model] = {}
id_map = deps[query_model]
has_relations = bool(rel_map.get(query_model))
for instance in prefetch_result.query:
if prefetch_result.field:
prefetch_result.store_instance(instance, id_map)
if has_relations:
for rel in rel_map[query_model]:
rel.populate_instance(instance, deps[rel.model])
return prefetch_result.query
def create_model_tables(models, **create_table_kwargs):
"""Create tables for all given models (in the right order)."""
for m in sort_models_topologically(models):
m.create_table(**create_table_kwargs)
def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
m.drop_table(**drop_table_kwargs)
def sort_models_topologically(models):
"""Sort models topologically so that parents will precede children."""
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for foreign_key in model._meta.reverse_rel.values():
dfs(foreign_key.model_class)
ordering.append(model) # parent will follow descendants
# order models by name and table initially to guarantee a total ordering
names = lambda m: (m._meta.name, m._meta.db_table)
for m in sorted(models, key=names, reverse=True):
dfs(m)
return list(reversed(ordering)) # want parents first in output ordering
| ownport/storages | storages/packages/peewee.py | Python | apache-2.0 | 136,615 |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': router.name
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def _subnet_config(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
| markmcclain/astara | akanda/rug/api/configuration.py | Python | apache-2.0 | 6,984 |
import os
from setuptools import find_packages
from setuptools import setup
version = '1.0'
install_requires = [
]
tests_require = install_requires + ['Sphinx', 'docutils',
'virtualenv', 'nose', 'coverage']
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.md')).read()
except IOError:
README = CHANGES = ''
kwargs = dict(
version=version,
name='strava',
description='Python wrapper for the Strava (http://www.strava.com) API',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License',
],
install_requires=install_requires,
license='Apache',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
tests_require=tests_require,
test_suite='strava.tests',
url='https://github.com/Packetslave/strava',
author='Brian Landers',
author_email='[email protected]',
entry_points="""\
"""
)
setup(**kwargs)
| Packetslave/strava | setup.py | Python | apache-2.0 | 1,182 |
"""Classification training"""
import os, sys
sys.path.append('../')
sys.path.append('../models/')
sys.path.append('../util/')
import time
import json
import importlib
import argparse
import tensorflow as tf
import numpy as np
from input_data import Data
def optimization(learning_rate, loss):
"""Defines the optimization operation"""
return tf.train.AdamOptimizer(learning_rate).minimize(loss)
def cross_entropy(logits, labels):
"""Defines loss function"""
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
return tf.reduce_mean(cross_entropy)
def compute_accuracy(logits, labels):
"""Computes classification accuracy"""
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def main():
parser = argparse.ArgumentParser("Classification training.")
parser.add_argument('model_config_file')
parser.add_argument('num_iters', type=int)
parser.add_argument('learning_rate', type=float)
parser.add_argument('--model_snapshot_dir', default='../snapshots')
args = parser.parse_args()
# Loads config file
config = json.load(open(args.model_config_file, 'r'))
model_name = config['model']
input_size_w = config['input_size']['x']
input_size_h = config['input_size']['y']
input_channels = config['input_size']['channel']
class_count = config['class_count']
batch_size = config['batch_size']
dropout = config['dropout']
# Loading data
train_data = Data(config['data']['train'])
valid_data = Data(config['data']['valid'])
train_data.shuffle_data()
# Snapshot setup
snapshot_filename = model_name + '.npy'
snapshot_path = os.path.join(args.model_snapshot_dir,
model_name,
snapshot_filename)
# Importing and creating model
model_module = importlib.import_module(model_name)
model = model_module.get_model(class_count,
input_size_w=input_size_w,
input_size_h=input_size_h,
is_training=True)
# Define loss, training, and validation functions
loss = cross_entropy(model.logits, model.labels)
train_step = optimization(args.learning_rate, loss)
accuracy = compute_accuracy(model.logits, model.labels)
# Session and variable initialization
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# Training code here
print('Begin Training')
elapsed_time = 0.0
initial_time = time.time()
for i in range(args.num_iters):
i += 1
train_images, train_labels = train_data.next_batch(batch_size)
train_step.run(feed_dict={model.x: train_images,
model.labels: train_labels,
model.dropout_prob: dropout})
if i % 100 == 0:
step_time = time.time() - initial_time
print(i, 'iterations: took {0:.2f}s'.format(step_time))
elapsed_time += step_time
initial_time = time.time()
if i % 500 == 0:
valid_images, valid_labels = valid_data.next_batch(batch_size)
acc = accuracy.eval(feed_dict={model.x: valid_images,
model.labels: valid_labels,
model.dropout_prob: 1.0})
print('Training accuracy: {0:.2f}%'.format(acc * 100))
print('Saving snapshot to', snapshot_path)
model.save_params(sess, snapshot_path)
print('Done')
print('Took {0:.2f}s'.format(elapsed_time))
if __name__ == "__main__":
main()
| Lazea/TensorFlow | classification/classification_train.py | Python | apache-2.0 | 3,781 |
Subsets and Splits