text
stringlengths 4
1.02M
| meta
dict |
---|---|
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'console_auth_tokens')
class ConsoleAuthTokensController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self._consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
super(ConsoleAuthTokensController, self).__init__(*args, **kwargs)
def show(self, req, id):
"""Checks a console auth token and returns the related connect info."""
context = req.environ['nova.context']
authorize(context)
token = id
connect_info = self._consoleauth_rpcapi.check_token(context, token)
if not connect_info:
raise webob.exc.HTTPNotFound(explanation=_("Token not found"))
console_type = connect_info.get('console_type')
# This is currently required only for RDP consoles
if console_type != "rdp-html5":
raise webob.exc.HTTPUnauthorized(
explanation=_("The requested console type details are not "
"accessible"))
return {'console':
dict([(i, connect_info[i])
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info])}
class Console_auth_tokens(extensions.ExtensionDescriptor):
"""Console token authentication support."""
name = "ConsoleAuthTokens"
alias = "os-console-auth-tokens"
namespace = ("http://docs.openstack.org/compute/ext/"
"consoles-auth-tokens/api/v2")
updated = "2013-08-13T00:00:00+00:00"
def get_resources(self):
controller = ConsoleAuthTokensController()
ext = extensions.ResourceExtension('os-console-auth-tokens',
controller)
return [ext]
| {
"content_hash": "c1fc5a87682c902739871f90bb996631",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 37.9622641509434,
"alnum_prop": 0.6232604373757455,
"repo_name": "ycl2045/nova-master",
"id": "c6aa9e07a5c22e7cbbbc836cc1a9825437679137",
"size": "2651",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/contrib/console_auth_tokens.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "2035"
},
{
"name": "Python",
"bytes": "13677408"
},
{
"name": "R",
"bytes": "7817"
},
{
"name": "Ruby",
"bytes": "851"
},
{
"name": "Shell",
"bytes": "14571"
}
],
"symlink_target": ""
} |
import io
import json
import os
import unittest
from . import episodeofcare
from .fhirdate import FHIRDate
class EpisodeOfCareTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("EpisodeOfCare", js["resourceType"])
return episodeofcare.EpisodeOfCare(js)
def testEpisodeOfCare1(self):
inst = self.instantiate_from("episodeofcare-example.json")
self.assertIsNotNone(inst, "Must have instantiated a EpisodeOfCare instance")
self.implEpisodeOfCare1(inst)
js = inst.as_json()
self.assertEqual("EpisodeOfCare", js["resourceType"])
inst2 = episodeofcare.EpisodeOfCare(js)
self.implEpisodeOfCare1(inst2)
def implEpisodeOfCare1(self, inst):
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(inst.diagnosis[0].role.coding[0].code, "CC")
self.assertEqual(inst.diagnosis[0].role.coding[0].display, "Chief complaint")
self.assertEqual(inst.diagnosis[0].role.coding[0].system, "http://hl7.org/fhir/diagnosis-role")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://example.org/sampleepisodeofcare-identifier")
self.assertEqual(inst.identifier[0].value, "123")
self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-09-01")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.statusHistory[0].period.end.date, FHIRDate("2014-09-14").date)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2014-09-14")
self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2014-09-01")
self.assertEqual(inst.statusHistory[0].status, "planned")
self.assertEqual(inst.statusHistory[1].period.end.date, FHIRDate("2014-09-21").date)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2014-09-21")
self.assertEqual(inst.statusHistory[1].period.start.date, FHIRDate("2014-09-15").date)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2014-09-15")
self.assertEqual(inst.statusHistory[1].status, "active")
self.assertEqual(inst.statusHistory[2].period.end.date, FHIRDate("2014-09-24").date)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2014-09-24")
self.assertEqual(inst.statusHistory[2].period.start.date, FHIRDate("2014-09-22").date)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2014-09-22")
self.assertEqual(inst.statusHistory[2].status, "onhold")
self.assertEqual(inst.statusHistory[3].period.start.date, FHIRDate("2014-09-25").date)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2014-09-25")
self.assertEqual(inst.statusHistory[3].status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "hacc")
self.assertEqual(inst.type[0].coding[0].display, "Home and Community Care")
self.assertEqual(inst.type[0].coding[0].system, "http://hl7.org/fhir/episodeofcare-type")
| {
"content_hash": "f0d38088021500abe6e69fb88e122ad1",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 104,
"avg_line_length": 57.08196721311475,
"alnum_prop": 0.6875358989086732,
"repo_name": "all-of-us/raw-data-repository",
"id": "f5b6f7449fb011de0ca2d45281b55fc6adbc58e9",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_3_0_0/models/episodeofcare_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""
Performs exponentiation, similarly to the built-in pow() or ** functions.
Allows also for calculating the exponentiation modulo.
"""
def power(a: int, n: int, mod: int = None):
"""
Iterative version of binary exponentiation
Calculate a ^ n
if mod is specified, return the result modulo mod
Time Complexity : O(log(n))
Space Complexity : O(1)
"""
ans = 1
while n:
if n & 1:
ans = ans * a
a = a * a
if mod:
ans %= mod
a %= mod
n >>= 1
return ans
def power_recur(a: int, n: int, mod: int = None):
"""
Recursive version of binary exponentiation
Calculate a ^ n
if mod is specified, return the result modulo mod
Time Complexity : O(log(n))
Space Complexity : O(log(n))
"""
if n == 0:
ans = 1
elif n == 1:
ans = a
else:
ans = power_recur(a, n // 2, mod)
ans = ans * ans
if n % 2:
ans = ans * a
if mod:
ans %= mod
return ans
| {
"content_hash": "ba4f7696c12d18872893a07b435e26dd",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 21.791666666666668,
"alnum_prop": 0.5210325047801148,
"repo_name": "keon/algorithms",
"id": "70d8587de4af16ef89b050ae12048a73d743ea26",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/maths/power.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "697310"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class PriorityList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param priority: {"description": "Set priority for Designated Router election (Priority value)", "format": "number", "default": 64, "maximum": 127, "minimum": 0, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify priority for level-1 routing; 'level-2': Specify priority for level-2 routing; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "priority-list"
self.DeviceProxy = ""
self.priority = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelloIntervalMinimalList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hello_interval_minimal: {"default": 0, "type": "number", "description": "Set Hello holdtime 1 second, interval depends on multiplier", "format": "flag"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hello-interval-minimal-list"
self.DeviceProxy = ""
self.hello_interval_minimal = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MeshGroup(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"description": "Mesh group number", "format": "number", "maximum": 4294967295, "minimum": 1, "not": "blocked", "type": "number"}
:param blocked: {"default": 0, "not": "value", "type": "number", "description": "Block LSPs on this interface", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "mesh-group"
self.DeviceProxy = ""
self.value = ""
self.blocked = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class BfdCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param disable: {"default": 0, "type": "number", "description": "Disable BFD", "format": "flag"}
:param bfd: {"default": 0, "type": "number", "description": "Bidirectional Forwarding Detection (BFD)", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "bfd-cfg"
self.DeviceProxy = ""
self.disable = ""
self.bfd = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class PasswordList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param password: {"minLength": 1, "maxLength": 254, "type": "string", "description": "Configure the authentication password for interface", "format": "string-rlx"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify password for level-1 PDUs; 'level-2': Specify password for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "password-list"
self.DeviceProxy = ""
self.password = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class KeyChainList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param key_chain: {"minLength": 1, "maxLength": 128, "type": "string", "description": "Authentication key-chain (Name of key-chain)", "format": "string"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication key-chain for level-1 PDUs; 'level-2': Specify authentication key-chain for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "key-chain-list"
self.DeviceProxy = ""
self.key_chain = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class ModeList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param mode: {"enum": ["md5"], "type": "string", "description": "'md5': Keyed message digest; ", "format": "enum"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication mode for level-1 PDUs; 'level-2': Specify authentication mode for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "mode-list"
self.DeviceProxy = ""
self.mode = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SendOnlyList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param send_only: {"default": 0, "type": "number", "description": "Authentication send-only", "format": "flag"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication send-only for level-1 PDUs; 'level-2': Specify authentication send-only for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "send-only-list"
self.DeviceProxy = ""
self.send_only = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Authentication(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param key_chain_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"key-chain": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Authentication key-chain (Name of key-chain)", "format": "string"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication key-chain for level-1 PDUs; 'level-2': Specify authentication key-chain for level-2 PDUs; ", "format": "enum"}}}]}
:param mode_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "mode": {"enum": ["md5"], "type": "string", "description": "'md5': Keyed message digest; ", "format": "enum"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication mode for level-1 PDUs; 'level-2': Specify authentication mode for level-2 PDUs; ", "format": "enum"}}}]}
:param send_only_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"send-only": {"default": 0, "type": "number", "description": "Authentication send-only", "format": "flag"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication send-only for level-1 PDUs; 'level-2': Specify authentication send-only for level-2 PDUs; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "authentication"
self.DeviceProxy = ""
self.key_chain_list = []
self.mode_list = []
self.send_only_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class WideMetricList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param wide_metric: {"description": "Configure the wide metric for interface", "format": "number", "default": 10, "maximum": 16777214, "minimum": 1, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "wide-metric-list"
self.DeviceProxy = ""
self.wide_metric = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelloIntervalList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hello_interval: {"description": "Set Hello interval in seconds (Hello interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hello-interval-list"
self.DeviceProxy = ""
self.hello_interval = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelloMultiplierList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hello_multiplier: {"description": "Set multiplier for Hello holding time (Hello multiplier value)", "format": "number", "default": 3, "maximum": 100, "minimum": 2, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello multiplier for level-1 IIHs; 'level-2': Specify hello multiplier for level-2 IIHs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hello-multiplier-list"
self.DeviceProxy = ""
self.hello_multiplier = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MetricList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param metric: {"description": "Configure the metric for interface (Default metric)", "format": "number", "default": 10, "maximum": 63, "minimum": 1, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "metric-list"
self.DeviceProxy = ""
self.metric = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class CsnpIntervalList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Speficy interval for level-1 CSNPs; 'level-2': Specify interval for level-2 CSNPs; ", "format": "enum"}
:param csnp_interval: {"description": "Set CSNP interval in seconds (CSNP interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "csnp-interval-list"
self.DeviceProxy = ""
self.level = ""
self.csnp_interval = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Isis(A10BaseClass):
"""Class Description::
ISIS.
Class isis supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param priority_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"priority": {"description": "Set priority for Designated Router election (Priority value)", "format": "number", "default": 64, "maximum": 127, "minimum": 0, "type": "number"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify priority for level-1 routing; 'level-2': Specify priority for level-2 routing; ", "format": "enum"}}}]}
:param retransmit_interval: {"description": "Set per-LSP retransmission interval (Interval between retransmissions of the same LSP (seconds))", "format": "number", "default": 5, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}
:param hello_interval_minimal_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"hello-interval-minimal": {"default": 0, "type": "number", "description": "Set Hello holdtime 1 second, interval depends on multiplier", "format": "flag"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}}}]}
:param network: {"optional": true, "enum": ["broadcast", "point-to-point"], "type": "string", "description": "'broadcast': Specify IS-IS broadcast multi-access network; 'point-to-point': Specify IS-IS point-to-point network; ", "format": "enum"}
:param lsp_interval: {"description": "Set LSP transmission interval (LSP transmission interval (milliseconds))", "format": "number", "default": 33, "optional": true, "maximum": 4294967295, "minimum": 1, "type": "number"}
:param padding: {"default": 1, "optional": true, "type": "number", "description": "Add padding to IS-IS hello packets", "format": "flag"}
:param password_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"password": {"minLength": 1, "maxLength": 254, "type": "string", "description": "Configure the authentication password for interface", "format": "string-rlx"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify password for level-1 PDUs; 'level-2': Specify password for level-2 PDUs; ", "format": "enum"}}}]}
:param wide_metric_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "wide-metric": {"description": "Configure the wide metric for interface", "format": "number", "default": 10, "maximum": 16777214, "minimum": 1, "type": "number"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}}}]}
:param hello_interval_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "hello-interval": {"description": "Set Hello interval in seconds (Hello interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}}}]}
:param circuit_type: {"description": "'level-1': Level-1 only adjacencies are formed; 'level-1-2': Level-1-2 adjacencies are formed; 'level-2-only': Level-2 only adjacencies are formed; ", "format": "enum", "default": "level-1-2", "type": "string", "enum": ["level-1", "level-1-2", "level-2-only"], "optional": true}
:param hello_multiplier_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "hello-multiplier": {"description": "Set multiplier for Hello holding time (Hello multiplier value)", "format": "number", "default": 3, "maximum": 100, "minimum": 2, "type": "number"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello multiplier for level-1 IIHs; 'level-2': Specify hello multiplier for level-2 IIHs; ", "format": "enum"}}}]}
:param metric_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"metric": {"description": "Configure the metric for interface (Default metric)", "format": "number", "default": 10, "maximum": 63, "minimum": 1, "type": "number"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}}}]}
:param csnp_interval_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Speficy interval for level-1 CSNPs; 'level-2': Specify interval for level-2 CSNPs; ", "format": "enum"}, "optional": true, "csnp-interval": {"description": "Set CSNP interval in seconds (CSNP interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}}}]}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/trunk/{ifnum}/isis`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "isis"
self.a10_url="/axapi/v3/interface/trunk/{ifnum}/isis"
self.DeviceProxy = ""
self.priority_list = []
self.retransmit_interval = ""
self.hello_interval_minimal_list = []
self.mesh_group = {}
self.network = ""
self.bfd_cfg = {}
self.lsp_interval = ""
self.padding = ""
self.password_list = []
self.authentication = {}
self.wide_metric_list = []
self.hello_interval_list = []
self.circuit_type = ""
self.hello_multiplier_list = []
self.metric_list = []
self.csnp_interval_list = []
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "3efc73ca6ba574adc91300cb9a397acb",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 554,
"avg_line_length": 50.73134328358209,
"alnum_prop": 0.6176816710797294,
"repo_name": "amwelch/a10sdk-python",
"id": "d566db871c72c046e468b50e3fd9ca34875ae1fd",
"size": "20394",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/interface/interface_trunk_isis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
} |
import pymc
import testmodel
import matplotlib.pyplot as plt
#load the testmodel
R = pymc.MCMC(testmodel, db='pickle', dbname='test.pickle')
# populate and run it
R.sample(iter=100000, burn=10000, thin=2)
print 'a:', R.a.stats()
print 'b:', R.b.stats()
print 'c:', R.c.stats()
print 'd:', R.d.stats()
#generate plots
pymc.Matplot.plot(R)
#close MCMC to write database
R.db.close()
#generate a comparison plot
fig = plt.figure()
plt.errorbar(testmodel.x, testmodel.f, yerr=testmodel.noise, label='data', ls='None', marker='D')
plt.plot(testmodel.x,
R.a.stats()['quantiles'][50]*testmodel.x**2 \
+ R.b.stats()['quantiles'][50]*testmodel.x \
+ R.c.stats()['quantiles'][50] \
+ R.d.stats()['quantiles'][50],
'g-',
label='Bayesian model fitting')
plt.plot(testmodel.x,
testmodel.z[0]*testmodel.x**2 + testmodel.z[1]*testmodel.x + testmodel.z[2],
'r--',
label=r'$\chi^{2}$ minimization')
plt.legend()
plt.savefig('test.pdf') | {
"content_hash": "653e481704f48c4f0b2791f7fea358a6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 97,
"avg_line_length": 27.16216216216216,
"alnum_prop": 0.6288557213930348,
"repo_name": "sniemi/SamPy",
"id": "bb06bc0fce57b41bb2790f7ee9861e43cec8b434",
"size": "1005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statistics/bayesian/calltestmodel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
import re
from models import Package
from models import ModulePackageState
from constants import PackageState
from base import BaseSoftwarePackageParser, BaseInventoryParser
from models import get_db_session_logger
class EXRSoftwarePackageParser(BaseSoftwarePackageParser):
def set_host_packages_from_cli(self, ctx):
admin_inactive_packages = {}
admin_active_packages = {}
admin_committed_packages = {}
non_admin_inactive_packages = {}
non_admin_active_packages = {}
non_admin_committed_packages = {}
inactive_packages = {}
active_packages = {}
committed_packages = {}
host_packages = []
cli_show_install_inactive = ctx.load_data('cli_show_install_inactive')
cli_show_install_active = ctx.load_data('cli_show_install_active')
cli_show_install_committed = ctx.load_data('cli_show_install_committed')
cli_admin_show_install_inactive = ctx.load_data('cli_admin_show_install_inactive')
cli_admin_show_install_active = ctx.load_data('cli_admin_show_install_active')
cli_admin_show_install_committed = ctx.load_data('cli_admin_show_install_committed')
# Handles Inactive Packages
if isinstance(cli_admin_show_install_inactive, list):
admin_inactive_packages = self.parse_inactive(cli_admin_show_install_inactive[0], PackageState.INACTIVE)
if isinstance(cli_show_install_inactive, list):
non_admin_inactive_packages = self.parse_inactive(cli_show_install_inactive[0], PackageState.INACTIVE)
inactive_packages.update(admin_inactive_packages)
inactive_packages.update(non_admin_inactive_packages)
# Handles Active Packages
if isinstance(cli_admin_show_install_active, list):
admin_active_packages = self.parse_packages_by_node(cli_admin_show_install_active[0], PackageState.ACTIVE)
if isinstance(cli_show_install_active, list):
non_admin_active_packages = self.parse_packages_by_node(cli_show_install_active[0], PackageState.ACTIVE)
active_packages.update(admin_active_packages)
active_packages.update(non_admin_active_packages)
# Handles Committed Packages
if isinstance(cli_admin_show_install_committed, list):
admin_committed_packages = self.parse_packages_by_node(cli_admin_show_install_committed[0],
PackageState.ACTIVE_COMMITTED)
if isinstance(cli_show_install_committed, list):
non_admin_committed_packages = self.parse_packages_by_node(cli_show_install_committed[0],
PackageState.ACTIVE_COMMITTED)
committed_packages.update(admin_committed_packages)
committed_packages.update(non_admin_committed_packages)
if committed_packages:
for package_name in active_packages:
# Extracts the Package object
active_package = active_packages.get(package_name)
committed_package = committed_packages.get(package_name)
if committed_package is not None:
# Peeks into the ModulePackageStates to see if the same line card
# with the same package appears in both active and committed areas.
for active_module_package_state in active_package.modules_package_state:
for committed_module_package_state in committed_package.modules_package_state:
if active_module_package_state.module_name == committed_module_package_state.module_name:
active_module_package_state.package_state = PackageState.ACTIVE_COMMITTED
active_package.state = PackageState.ACTIVE_COMMITTED
for package_name in inactive_packages:
# Extracts the Package object
inactive_package = inactive_packages.get(package_name)
committed_package = committed_packages.get(package_name)
if committed_package is not None:
# Peeks into the ModulePackageStates to see if the same line card
# with the same package appears in both inactive and committed areas.
for inactive_module_package_state in inactive_package.modules_package_state:
for committed_module_package_state in committed_package.modules_package_state:
if inactive_module_package_state.module_name == committed_module_package_state.module_name:
inactive_module_package_state.package_state = PackageState.INACTIVE_COMMITTED
inactive_package.state = PackageState.INACTIVE_COMMITTED
for package in active_packages.values():
host_packages.append(package)
for package in inactive_packages.values():
host_packages.append(package)
if len(host_packages) > 0:
ctx.host.packages = host_packages
return True
return False
def parse_inactive(self, lines, package_state):
"""
NON-ADMIN:
RP/0/RP0/CPU0:Deploy#show install inactive
5 inactive package(s) found:
ncs6k-k9sec-5.2.5.47I
ncs6k-mpls-5.2.5.47I
ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i
ncs6k-mgbl-5.2.5.47I
ncs6k-5.2.5.CSCuz65240-1.0.0
ADMIN: Inactive
sysadmin-vm:0_RP0:NCS-Deploy2# show install inactive
Wed Jun 8 23:03:38.637 UTC
Node 0/RP0 [RP]
Inactive Packages:
ncs6k-sysadmin-5.0.1.CSCun50237-1.0.0
ncs6k-sysadmin-5.2.3.CSCut94440-1.0.0
ncs6k-sysadmin-5.0.1.CSCum80946-1.0.0
ncs6k-sysadmin-5.0.1.CSCus71815-1.0.0
ncs6k-sysadmin-5.2.3.CSCut24295-1.0.0
ncs6k-sysadmin-5.0.1.CSCuq00795-1.0.0
Node 0/RP1 [RP]
Inactive Packages:
ncs6k-sysadmin-5.0.1.CSCun50237-1.0.0
ncs6k-sysadmin-5.2.3.CSCut94440-1.0.0
ncs6k-sysadmin-5.0.1.CSCum80946-1.0.0
ncs6k-sysadmin-5.0.1.CSCus71815-1.0.0
ncs6k-sysadmin-5.2.3.CSCut24295-1.0.0
ncs6k-sysadmin-5.0.1.CSCuq00795-1.0.0
"""
package_dict = {}
if lines:
lines = lines.splitlines()
for line in lines:
line = line.strip()
if len(line) == 0: continue
if re.match("(ncs.*|asr9k.*|xrv9k.*)", line):
package_dict[line] = Package(location=None, name=line, state=package_state)
return package_dict
def parse_packages_by_node(self, lines, package_state):
"""
Used to parse 'show install active/committed' CLI output.
Package
ModulePackageState
ModulePackageState
NON-ADMIN: Active
RP/0/RP0/CPU0:Deploy#show install active
Node 0/RP0/CPU0 [RP]
Boot Partition: xr_lv0
Active Packages: 8
ncs6k-xr-5.2.5 version=5.2.5 [Boot image]
ncs6k-mgbl-5.2.5
ncs6k-mcast-5.2.5
ncs6k-li-5.2.5
ncs6k-k9sec-5.2.5
ncs6k-doc-5.2.5
ncs6k-mpls-5.2.5
ncs6k-5.2.5.CSCux82987-1.0.0
Node 0/RP1/CPU0 [RP]
Boot Partition: xr_lv0
Active Packages: 8
ncs6k-xr-5.2.5 version=5.2.5 [Boot image]
ncs6k-mgbl-5.2.5
ncs6k-mcast-5.2.5
ncs6k-li-5.2.5
ncs6k-k9sec-5.2.5
ncs6k-doc-5.2.5
ncs6k-mpls-5.2.5
ncs6k-5.2.5.CSCux82987-1.0.0
ADMIN: Active
sysadmin-vm:0_RP0:NCS-Deploy2# show install active
Wed Jun 8 22:47:32.908 UTC
Node 0/RP0 [RP]
Active Packages: 2
ncs6k-sysadmin-5.2.5 version=5.2.5 [Boot image]
ncs6k-sysadmin-5.2.5.CSCuy44658-1.0.0
Node 0/RP1 [RP]
Active Packages: 2
ncs6k-sysadmin-5.2.5 version=5.2.5 [Boot image]
ncs6k-sysadmin-5.2.5.CSCuy44658-1.0.0
Node 0/2 [LC]
Active Packages: 2
ncs6k-sysadmin-5.2.5 version=5.2.5 [Boot image]
ncs6k-sysadmin-5.2.5.CSCuy44658-1.0.0
"""
package_dict = {}
if lines:
trunks = self.get_trunks(lines.splitlines())
if len(trunks) > 0:
# Collect all the packages
package_list = []
for module in trunks:
for package in trunks[module]:
if not package in package_list and re.match("(ncs.*|asr9k.*|iosxr.*|xrv9k.*)", package):
package_list.append(package)
for package_name in package_list:
package = Package(
name=package_name,
location=None,
state=package_state)
# Check which module has this package
for module in trunks:
for line in trunks[module]:
if line == package_name:
package.modules_package_state.append(ModulePackageState(
module_name=module,
package_state=package_state))
package_dict[package_name] = package
return package_dict
def get_trunks(self, lines):
"""
Return the CLI outputs in trunks. Each Trunk is a section of module and its packages.
Below is an example of two trunks.
Node 0/RP0/CPU0 [RP]
Boot Partition: xr_lv36
Active Packages: 7
ncs6k-xr-5.2.1 version=5.2.1 [Boot image]
ncs6k-doc-5.2.1
ncs6k-k9sec-5.2.1
ncs6k-mcast-5.2.1
ncs6k-mgbl-5.2.1
ncs6k-mpls-5.2.1
ncs6k-5.2.1.CSCur01489-1.0.0
Node 0/RP1/CPU0 [RP]
Boot Partition: xr_lv36
Active Packages: 7
ncs6k-xr-5.2.1 version=5.2.1 [Boot image]
ncs6k-doc-5.2.1
ncs6k-k9sec-5.2.1
ncs6k-mcast-5.2.1
ncs6k-mgbl-5.2.1
ncs6k-mpls-5.2.1
ncs6k-5.2.1.CSCur01489-1.0.0
"""
trunks = {}
trunk = []
module = None
for line in lines:
line = line.strip()
if len(line) == 0: continue
m = re.match("(Node.*)", line)
if m:
if module is not None:
trunks[module] = trunk
trunk = []
# Node 0/RP0/CPU0 [RP] becomes 0/RP0/CPU0
module = line.split()[1]
# For admin, CPU0 is missing for the node
if 'CPU0' not in module:
module = '{}/CPU0'.format(module)
else:
if module is not None:
if re.match("(ncs.*|asr9k.*|xrv9k.*)", line):
# For situation: ncs6k-xr-5.2.1 version=5.2.1 [Boot image]
trunk.append(line.split()[0])
else:
trunk.append(line)
if module is not None:
trunks[module] = trunk
return trunks
class EXRInventoryParser(BaseInventoryParser):
def process_inventory(self, ctx):
"""
For ASR9K-64, NCS6K and NCS5500
Chassis most likely shows up first in the
output of "admin show inventory".
Example for ASR9K-64:
Name: Rack 0 Descr: ASR-9904 AC Chassis
PID: ASR-9904-AC VID: V01 SN: FOX1746GHJ9
Example for NCS6K:
Name: Rack 0 Descr: NCS 6008 - 8-Slot Chassis
PID: NCS-6008 VID: V01 SN: FLM17476JWA
Example for NCS5500:
Name: Rack 0 Descr: NCS5500 8 Slot Single Chassis
PID: NCS-5508 VID: V01 SN: FGE194714QX
"""
if not ctx.load_data('cli_show_inventory'):
return
inventory_output = ctx.load_data('cli_show_inventory')[0]
inventory_data = self.parse_inventory_output(inventory_output)
chassis_indices = []
for idx in xrange(0, len(inventory_data)):
if self.REGEX_RACK.match(inventory_data[idx]['name']) and \
self.REGEX_CHASSIS.search(inventory_data[idx]['description']):
chassis_indices.append(idx)
if chassis_indices:
return self.store_inventory(ctx, inventory_data, chassis_indices)
logger = get_db_session_logger(ctx.db_session)
logger.exception('Failed to find chassis in inventory output for host {}.'.format(ctx.host.hostname))
return
class NCS1K5KIOSXRvInventoryParser(EXRInventoryParser):
def process_inventory(self, ctx):
"""
For NCS1K, NCS5K and IOSXRv9K
Chassis most likely shows up first in the
output of "admin show inventory".
Example for NCS1K:
Name: Rack 0 Descr: Network Convergence System 1000 Controller
PID: NCS1002 VID: V01 SN: CHANGE-ME-
Example for NCS5K:
Name: Rack 0 Descr:
PID: NCS-5002 VID: V01 SN: FOC1946R0DH
Example for IOSXRv:
NAME: "Rack 0", DESCR: "Cisco XRv9K Virtual Router"
PID: R-IOSXRV9000-CH , VID: V01, SN: DA55BD5FAC9
"""
if not ctx.load_data('cli_show_inventory'):
return
inventory_output = ctx.load_data('cli_show_inventory')[0]
inventory_data = self.parse_inventory_output(inventory_output)
chassis_indices = []
for idx in xrange(0, len(inventory_data)):
if self.REGEX_RACK.match(inventory_data[idx]['name']):
chassis_indices.append(idx)
if chassis_indices:
return self.store_inventory(ctx, inventory_data, chassis_indices)
logger = get_db_session_logger(ctx.db_session)
logger.exception('Failed to find chassis in inventory output for host {}.'.format(ctx.host.hostname))
return
| {
"content_hash": "46ecc7d0a0f6c9742711ee34d174bad4",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 119,
"avg_line_length": 39.6260162601626,
"alnum_prop": 0.5549856380796061,
"repo_name": "smjurcak/csm",
"id": "fd3049b2ff9e3d961eb90c30050723788a9b8e9d",
"size": "16113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csmserver/parsers/platforms/eXR.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84140"
},
{
"name": "HTML",
"bytes": "618824"
},
{
"name": "JavaScript",
"bytes": "572667"
},
{
"name": "Python",
"bytes": "978958"
},
{
"name": "Shell",
"bytes": "3584"
}
],
"symlink_target": ""
} |
import numpy as np
import os
from pathlib import Path
import unittest
import ray
import ray.rllib.agents.marwil as marwil
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.offline import JsonReader
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check, check_compute_single_action, \
framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class TestMARWIL(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_marwil_compilation_and_learning_from_offline_file(self):
"""Test whether a MARWILTrainer can be built with all frameworks.
Learns from a historic-data file.
To generate this data, first run:
$ ./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
"""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["evaluation_num_workers"] = 1
config["evaluation_interval"] = 3
config["evaluation_num_episodes"] = 5
config["evaluation_parallel_to_training"] = True
# Evaluate on actual environment.
config["evaluation_config"] = {"input": "sampler"}
# Learn from offline data.
config["input"] = [data_file]
num_iterations = 350
min_reward = 70.0
# Test for all frameworks.
for _ in framework_iterator(config, frameworks=("tf", "torch")):
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
learnt = False
for i in range(num_iterations):
eval_results = trainer.train().get("evaluation")
if eval_results:
print("iter={} R={} ".format(
i, eval_results["episode_reward_mean"]))
# Learn until some reward is reached on an actual live env.
if eval_results["episode_reward_mean"] > min_reward:
print("learnt!")
learnt = True
break
if not learnt:
raise ValueError(
"MARWILTrainer did not reach {} reward from expert "
"offline data!".format(min_reward))
check_compute_single_action(
trainer, include_prev_action_reward=True)
trainer.stop()
def test_marwil_cont_actions_from_offline_file(self):
"""Test whether MARWILTrainer runs with cont. actions.
Learns from a historic-data file.
To generate this data, first run:
$ ./train.py --run=PPO --env=Pendulum-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
"""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/pendulum/large.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 1
config["evaluation_num_workers"] = 1
config["evaluation_interval"] = 3
config["evaluation_num_episodes"] = 5
config["evaluation_parallel_to_training"] = True
# Evaluate on actual environment.
config["evaluation_config"] = {"input": "sampler"}
# Learn from offline data.
config["input"] = [data_file]
config["input_evaluation"] = [] # disable (data has no action-probs)
num_iterations = 3
# Test for all frameworks.
for _ in framework_iterator(config, frameworks=("tf", "torch")):
trainer = marwil.MARWILTrainer(config=config, env="Pendulum-v0")
for i in range(num_iterations):
print(trainer.train())
trainer.stop()
def test_marwil_loss_function(self):
"""
To generate the historic data used in this test case, first run:
$ ./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
"""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/small.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
# Learn from offline data.
config["input"] = [data_file]
for fw, sess in framework_iterator(config, session=True):
reader = JsonReader(inputs=[data_file])
batch = reader.next()
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
model = policy.model
# Calculate our own expected values (to then compare against the
# agent's loss output).
cummulative_rewards = compute_advantages(
batch, 0.0, config["gamma"], 1.0, False, False)["advantages"]
if fw == "torch":
cummulative_rewards = torch.tensor(cummulative_rewards)
if fw != "tf":
batch = policy._lazy_tensor_dict(batch)
model_out, _ = model.from_batch(batch)
vf_estimates = model.value_function()
if fw == "tf":
model_out, vf_estimates = \
policy.get_session().run([model_out, vf_estimates])
adv = cummulative_rewards - vf_estimates
if fw == "torch":
adv = adv.detach().cpu().numpy()
adv_squared = np.mean(np.square(adv))
c_2 = 100.0 + 1e-8 * (adv_squared - 100.0)
c = np.sqrt(c_2)
exp_advs = np.exp(config["beta"] * (adv / c))
dist = policy.dist_class(model_out, model)
logp = dist.logp(batch["actions"])
if fw == "torch":
logp = logp.detach().cpu().numpy()
elif fw == "tf":
logp = sess.run(logp)
# Calculate all expected loss components.
expected_vf_loss = 0.5 * adv_squared
expected_pol_loss = -1.0 * np.mean(exp_advs * logp)
expected_loss = \
expected_pol_loss + config["vf_coeff"] * expected_vf_loss
# Calculate the algorithm's loss (to check against our own
# calculation above).
batch.set_get_interceptor(None)
postprocessed_batch = policy.postprocess_trajectory(batch)
loss_func = marwil.marwil_tf_policy.marwil_loss if fw != "torch" \
else marwil.marwil_torch_policy.marwil_loss
if fw != "tf":
policy._lazy_tensor_dict(postprocessed_batch)
loss_out = loss_func(policy, model, policy.dist_class,
postprocessed_batch)
else:
loss_out, v_loss, p_loss = policy.get_session().run(
[policy._loss, policy.loss.v_loss, policy.loss.p_loss],
feed_dict=policy._get_loss_inputs_dict(
postprocessed_batch, shuffle=False))
# Check all components.
if fw == "torch":
check(policy.v_loss, expected_vf_loss, decimals=4)
check(policy.p_loss, expected_pol_loss, decimals=4)
elif fw == "tf":
check(v_loss, expected_vf_loss, decimals=4)
check(p_loss, expected_pol_loss, decimals=4)
else:
check(policy.loss.v_loss, expected_vf_loss, decimals=4)
check(policy.loss.p_loss, expected_pol_loss, decimals=4)
check(loss_out, expected_loss, decimals=3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "73d7adb56b6acc3b1539188de10c2ef2",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 42.48039215686274,
"alnum_prop": 0.5551580890837757,
"repo_name": "pcmoritz/ray-1",
"id": "29c6b678ecf2ce27cae113654acd2a8fdfdc851a",
"size": "8666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/agents/marwil/tests/test_marwil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
__author__ = 'mr.S'
from bottle import request, FileUpload
from PIL import Image
import os
def is_ajax():
return True if request.headers.get('X-Requested-With') == 'XMLHttpRequest' else False
def image_thumbnail(image, width, height, position=('center', 'center'), fill='contain'):
try:
if type(image) is FileUpload:
image.file.seek(0)
image = image.file
image = Image.open(image)
owidth = image.size[0]
oheight = image.size[1]
wr, hr = 1.0*width/owidth, 1.0*height/oheight
size = owidth, oheight
x, y = position
# back = Image.new('RGBA', (width, height), (125, 125, 125, 0))
if fill == 'cover':
if wr < hr:
size = owidth*height/oheight, height
else:
size = width, oheight*width/owidth
else:
if wr > hr:
size = owidth*height/oheight, height
else:
size = width, oheight*width/owidth
if x == 'center':
x = (size[0] - width) / 2
elif x == 'right':
x = size[0] - width
else:
x = 0
if y == 'center':
y = (size[1] - height) / 2
elif y == 'bottom':
y = size[1] - height
else:
y = 0
image = image.resize(size, Image.ANTIALIAS)
image = image.crop((x, y, x+width, y+height))
return image
except IOError, e:
print e.errno
print e
print "Can not resize image "
def image_resize(image, width=None, height=None, max_width=None, max_height=None):
try:
if type(image) is FileUpload:
image.file.seek(0)
image = image.file
image = Image.open(image)
owidth = image.size[0]
oheight = image.size[1]
size = owidth, oheight
if not width and owidth > max_width: width = max_width
if not height and oheight > max_height: height = max_height
if width is not None and height is not None:
size = width, height
elif width is not None:
p = width/float(owidth)
size = width, int(oheight*p)
elif height is not None:
p = height/float(oheight)
size = int(owidth*p), height
image = image.resize(size, Image.ANTIALIAS)
print size
if image.mode == 'RGBA':
bg = Image.new(mode='RGBA', size=image.size, color=(255, 255, 255, 0))
bg.paste(image, image)
image = bg
return image
except IOError, e:
print e.errno
print e
print "Can not resize image "
def remove_similar(path, name):
if name is not None:
name = name.split('.')[0]+'.'
if os.path.exists(path):
for f in os.listdir(path):
if not os.path.isdir(f) and f.startswith(name):
os.remove(os.path.join(path, f)) | {
"content_hash": "da5f426598475fb57d56ad82fb839aaf",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 89,
"avg_line_length": 32.17894736842105,
"alnum_prop": 0.5086686293752044,
"repo_name": "s-tar/project_kate",
"id": "562a42ae0a2a52e003b90ebf360c2cd228e488b6",
"size": "3105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kernel/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "283826"
},
{
"name": "CoffeeScript",
"bytes": "11492"
},
{
"name": "HTML",
"bytes": "34688"
},
{
"name": "JavaScript",
"bytes": "111309"
},
{
"name": "Python",
"bytes": "110741"
}
],
"symlink_target": ""
} |
print "Test message one"
print "Test message two"
print "Test message three" | {
"content_hash": "8b6b0b238385e67d20a5b0fe137d49e8",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.7763157894736842,
"repo_name": "priyam3nidhi/pnSeattle",
"id": "8557503b1c600cb567016a8991d52b114177be32",
"size": "206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utf/tests/ut_utftests_multiline_simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79723"
}
],
"symlink_target": ""
} |
import wx
import armid
import WidgetFactory
from Borg import Borg
class CharacteristicReferenceTypeDialog(wx.Dialog):
def __init__(self,parent,ciName,elName,currentValue):
wx.Dialog.__init__(self,parent,armid.CHARACTERISTICREFERENCETYPE_ID,'Edit Characteristic Reference Type',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(300,400))
self.theElementName = elName
self.theValue = currentValue
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Characteristic Reference Type',(87,30),armid.CHARACTERISTICREFERENCETYPE_COMBOVALUE_ID,['grounds','warrant','rebuttal']),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildTextSizer(self,'Characteristic Intention',(87,30),armid.CHARACTERISTICREFERENCETYPE_TEXTCHARINTENT_ID),0,wx.EXPAND)
ciCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_TEXTCHARINTENT_ID)
ciCtrl.Disable()
mainSizer.Add(WidgetFactory.buildTextSizer(self,'Intention',(87,30),armid.CHARACTERISTICREFERENCETYPE_TEXTINTENTION_ID),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Intention Type',(87,30),armid.CHARACTERISTICREFERENCETYPE_COMBOINTTYPE_ID,['goal','softgoal']),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Means/End',(87,30),armid.CHARACTERISTICREFERENCETYPE_COMBOMEANSEND_ID,['means','end']),0,wx.EXPAND)
contType = ['Make','SomePositive','Help','Hurt','SomeNegative','Break']
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Contribution',(87,30),armid.CHARACTERISTICREFERENCETYPE_COMBOCONTRIBUTION_ID,contType),0,wx.EXPAND)
mainSizer.Add(wx.StaticText(self,-1,''),1,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildCommitButtonSizer(self,armid.CHARACTERISTICREFERENCETYPE_BUTTONCOMMIT_ID,False),0,wx.ALIGN_CENTER)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.CHARACTERISTICREFERENCETYPE_BUTTONCOMMIT_ID,self.onCommit)
self.valueCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_COMBOVALUE_ID)
self.valueCtrl.SetValue(currentValue)
self.ciCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_TEXTCHARINTENT_ID)
self.ciCtrl.SetValue(ciName)
self.intCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_TEXTINTENTION_ID)
self.intTypeCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_COMBOINTTYPE_ID)
self.meCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_COMBOMEANSEND_ID)
self.contCtrl = self.FindWindowById(armid.CHARACTERISTICREFERENCETYPE_COMBOCONTRIBUTION_ID)
b = Borg()
intDetails = b.dbProxy.impliedCharacteristicElementIntention(ciName,elName)
intName = intDetails[0]
intDim = intDetails[1]
meName = intDetails[2]
contName = intDetails[3]
if intName != '':
self.intCtrl.SetValue(intName)
self.intTypeCtrl.SetValue(intDim)
self.meCtrl.SetValue(meName)
self.contCtrl.SetValue(contName)
def onCommit(self,evt):
self.theValue = self.valueCtrl.GetValue()
ciName = self.ciCtrl.GetValue()
intName = self.intCtrl.GetValue()
intDim = self.intTypeCtrl.GetValue()
meName = self.meCtrl.GetValue()
contName = self.contCtrl.GetValue()
b = Borg()
b.dbProxy.updateImpliedCharacteristicElementIntention(ciName,self.theElementName,intName,intDim,meName,contName)
self.EndModal(armid.CHARACTERISTICREFERENCETYPE_BUTTONCOMMIT_ID)
def value(self): return self.theValue
| {
"content_hash": "c3dd798f0c1d608820f5a0d919ab9557",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 202,
"avg_line_length": 51.83582089552239,
"alnum_prop": 0.7771379211056724,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "90b42d9a31aad00bb114c830240e32cad2069af0",
"size": "4272",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/CharacteristicReferenceTypeDialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
} |
import os
import shutil
import sys
import tempfile
import unittest
from array import array
from pyspark.testing.utils import ReusedPySparkTestCase, SPARK_HOME
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
if __name__ == "__main__":
from pyspark.tests.test_readwrite import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "38aa4e7f1f10e30ee54f5fa0650cc09e",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 100,
"avg_line_length": 48.107660455486545,
"alnum_prop": 0.5710535376140472,
"repo_name": "actuaryzhang/spark",
"id": "734b7e4789f614d4ec4daa935168d8541d098428",
"size": "24020",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "python/pyspark/tests/test_readwrite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "46709"
},
{
"name": "Batchfile",
"bytes": "31352"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26884"
},
{
"name": "Dockerfile",
"bytes": "8823"
},
{
"name": "HTML",
"bytes": "70460"
},
{
"name": "HiveQL",
"bytes": "1823701"
},
{
"name": "Java",
"bytes": "4013751"
},
{
"name": "JavaScript",
"bytes": "201700"
},
{
"name": "Makefile",
"bytes": "9397"
},
{
"name": "PLpgSQL",
"bytes": "168476"
},
{
"name": "PowerShell",
"bytes": "3867"
},
{
"name": "Python",
"bytes": "2970168"
},
{
"name": "R",
"bytes": "1185910"
},
{
"name": "Roff",
"bytes": "15727"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "29855180"
},
{
"name": "Shell",
"bytes": "201297"
},
{
"name": "TSQL",
"bytes": "427755"
},
{
"name": "Thrift",
"bytes": "67610"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
} |
from kivy.graphics import Color, Line
from kivy.properties import NumericProperty, ListProperty
from kivy.uix.widget import Widget
class SquareGrid(Widget):
cols = NumericProperty(16)
rows = NumericProperty(9)
cell = ListProperty([0, 0])
line_width = NumericProperty(1)
line_color = ListProperty([1, 1, 1, 1])
def __init__(self, **kw):
super(SquareGrid, self).__init__(**kw)
self.bind(cols=self.draw, rows=self.draw, pos=self.draw, size=self.draw)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
x = (touch.x - self.x) / self.cols
y = (touch.y - self.y) / self.rows
self.cell = x, y
return True
return super(SquareGrid, self).on_touch_down(touch)
def draw(self, *args):
x, y = self.pos
w, h = self.size
col_w = self.width / float(self.cols)
row_h = self.height / float(self.rows)
self.canvas.clear()
with self.canvas:
Color(*self.line_color)
for y in range(self.rows + 1):
for x in range(self.cols + 1):
Line(
points=[x * col_w, self.y, x * col_w, self.y + h],
width=self.line_width)
Line(
points=[self.x, y * row_h, self.x + w, y * row_h],
width=self.line_width)
if __name__ == '__main__':
from kivy.base import runTouchApp
sg = SquareGrid(cols=8, rows=6)
runTouchApp(sg)
| {
"content_hash": "7bff9b02d3a7c2211a7ffaf429571fb9",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 32.93617021276596,
"alnum_prop": 0.5361757105943152,
"repo_name": "victor-rene/kivy-gamelib",
"id": "f36befd6669838a5aa8c5ec8f09ada8e2695f453",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "squaregrid/squaregrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43746"
}
],
"symlink_target": ""
} |
from bisect import bisect_left
from couchdb import Server
from datetime import datetime
class Metric(dict):
def __init__(self, timestamp, host="ALL", metrics={}):
self.timestamp = timestamp
self.host = host
self.metrics = metrics
super(Metric, self).__init__(metrics)
class CraftsCollection(dict):
def __init__(self, db, role):
self._db = db
self.role = role
super(CraftsCollection, self).__init__()
def find_nearest(self, dt):
pos = bisect_left(sorted(self.keys()), dt)
return self[self.keys()[pos]]
def get(self, view, start=datetime.min, end=datetime.max):
result = self._db.view(view,
startkey=[self.role, start.isoformat()],
endkey=[self.role, end.isoformat()])
self.update(dict([(datetime.strptime(
doc.key[1], '%Y-%m-%dT%H:%M:%S'),
doc.value) for doc in result]))
class AggregateCollection(CraftsCollection):
def get(self, start=datetime.min, end=datetime.max):
super(AggregateCollection, self).get('crafts/aggregates',
start, end)
class PredictionCollection(CraftsCollection):
def get(self, start=datetime.min, end=datetime.max):
super(PredictionCollection, self).get('crafts/predictions',
start, end)
def save(self):
docs = []
for time, predictions in self.items():
doc = {
'_id': '{}/{}/prediction'.format(self.role, time.isoformat()),
'role': self.role,
'timestamp': time.isoformat(),
'type': 'prediction',
'predictions': predictions
}
docs.append(doc)
self._db.update(docs)
def add(self, p):
if p.timestamp not in self:
self[p.timestamp] = {}
self[p.timestamp].update(p.metrics)
class MetricCollection(CraftsCollection):
def get(self, start=datetime.min, end=datetime.max):
super(AggregateCollection, self).get('crafts/samples',
start, end)
def save(self,):
docs = []
for time, hosts in self.items():
doc = {
'_id': '{}/{}/sample'.format(self.role, time.isoformat()),
'role': self.role,
'timestamp': time.isoformat(),
'type': 'sample',
'hosts': hosts
}
docs.append(doc)
self._db.update(docs)
def add(self, m):
if m.timestamp not in self:
self[m.timestamp] = {}
if m.host not in self[m.timestamp]:
self[m.timestamp][m.host] = {}
self[m.timestamp][m.host].update(m.metrics)
| {
"content_hash": "8a68243861f0c9f3812a31abea7c8e6d",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 30.22340425531915,
"alnum_prop": 0.5255191833861317,
"repo_name": "crafts/crafts-core",
"id": "ba463e687e9392b14909e3ba8568d8d67bf19e13",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crafts/common/metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "895"
},
{
"name": "Python",
"bytes": "26837"
},
{
"name": "Shell",
"bytes": "401"
}
],
"symlink_target": ""
} |
"""
Basic functionality to aid in the definition of in-memory test doubles.
"""
import os
from weakref import WeakKeyDictionary
import attr
import txaws.credentials
class ControllerState(object):
def __get__(self, oself, type):
return oself._controller.get_state(oself.creds)
@attr.s
class MemoryClient(object):
_state = ControllerState()
_controller = attr.ib()
creds = attr.ib()
@attr.s(frozen=True)
class MemoryService(object):
"""
L{MemoryService} is the entrypoint into an in-memory implementation of a
single AWS service.
@ivar client_factory: A callable which returns the client object for this
service. Its arguments are this object, some credentials, and any
extra arguments passed to ``client``.
@ivar state_factory: A no-argument callable which returns the
implementation state for a client object returned by
C{client_factory}.
"""
client_factory = attr.ib()
state_factory = attr.ib()
_state = attr.ib(
default=attr.Factory(dict),
init=False,
hash=False,
)
def get_state(self, creds):
"""
Get the state that belongs to a particular account.
@param creds: The credentials which identify a particular account.
@type creds: L{AWSCredentials}
@return: The state for the account, creating it if necessary. The
state will be whatever C{state_factory} returns.
"""
key = (creds.access_key, creds.secret_key)
return self._state.setdefault(key, self.state_factory())
def client(self, creds, *a, **kw):
"""
Get an in-memory verified fake client for this service.
@param creds: The credentials to associate with the account. No
authentication is performed but this identifies the state the
client will find.
@type creds: L{AWSCredentials}
@return: A new client for this service along with the state object for
the client.
@rtype: L{tuple}
"""
client = self.client_factory(self, creds, *a, **kw)
return client, self.get_state(creds)
| {
"content_hash": "99b977d4edf333114cf2697508af8289",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 28.155844155844157,
"alnum_prop": 0.6452952029520295,
"repo_name": "oubiwann/txaws",
"id": "2f73b22aa1f0c6bd54c060c9e387d46a310d4f42",
"size": "2247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "txaws/testing/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3962"
},
{
"name": "Python",
"bytes": "606696"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
async def sample_delete_specialist_pool():
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteSpecialistPoolRequest(
name="name_value",
)
# Make the request
operation = client.delete_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async]
| {
"content_hash": "0da11c65e437abf65003f8699dc3c729",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 85,
"avg_line_length": 27.130434782608695,
"alnum_prop": 0.7371794871794872,
"repo_name": "googleapis/python-aiplatform",
"id": "7d0343f8360721eb889a09f0cefc582b5c4b2f72",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
"""ThreadSafeSingleton provider async mode tests."""
import asyncio
from dependency_injector import providers
from pytest import mark
@mark.asyncio
async def test_async_mode():
instance = object()
async def create_instance():
return instance
provider = providers.ThreadSafeSingleton(create_instance)
instance1 = await provider()
instance2 = await provider()
assert instance1 is instance2
assert instance1 is instance
assert instance2 is instance
@mark.asyncio
async def test_concurrent_init():
async def create_instance():
return object()
provider = providers.ThreadSafeSingleton(create_instance)
future_instance1 = provider()
future_instance2 = provider()
instance1, instance2 = await asyncio.gather(future_instance1, future_instance2)
assert instance1 is instance2
| {
"content_hash": "b8a9b17834ccf319b2f25859b95902b4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 83,
"avg_line_length": 22.44736842105263,
"alnum_prop": 0.7291910902696366,
"repo_name": "ets-labs/dependency_injector",
"id": "13654150e8c3ae915f54e777972e2db64282ace0",
"size": "853",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/providers/async/test_thread_safe_singleton_py36.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171148"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20161005_1253'),
]
operations = [
migrations.AddField(
model_name='entry',
name='author',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| {
"content_hash": "aaf9373512ac15f20d77adaee360a53e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 21.944444444444443,
"alnum_prop": 0.5924050632911393,
"repo_name": "bane138/nonhumanuser",
"id": "4e1502a271b4f809f0c15a5e57b0e5989652e352",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0012_entry_author.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "97404"
},
{
"name": "HTML",
"bytes": "40972"
},
{
"name": "JavaScript",
"bytes": "161253"
},
{
"name": "Python",
"bytes": "101304"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
from supriya.system.SupriyaObject import SupriyaObject
class OscCallback(SupriyaObject):
"""
An OSC callback.
::
>>> import supriya.osc
>>> callback = supriya.osc.OscCallback(
... address_pattern='/*',
... procedure=lambda x: print('GOT:', x),
... )
"""
### CLASS VARIABLES ###
__slots__ = ("_address_pattern", "_argument_template", "_is_one_shot", "_procedure")
### INITIALIZER ###
def __init__(
self,
address_pattern=None,
argument_template=None,
is_one_shot=False,
procedure=None,
):
self._address_pattern = address_pattern
if argument_template is not None:
argument_template = tuple(argument_template)
self._argument_template = argument_template
self._procedure = procedure
self._is_one_shot = bool(is_one_shot)
### SPECIAL METHODS ###
def __call__(self, message):
self._procedure(message)
### PUBLIC PROPERTIES ###
@property
def address_pattern(self):
"""
The address pattern of the callback.
::
>>> callback = supriya.osc.OscCallback(
... address_pattern='/*',
... procedure=lambda x: print('GOT:', x),
... )
>>> callback.address_pattern
'/*'
Returns string.
"""
return self._address_pattern
@property
def argument_template(self):
return self._argument_template
@property
def is_one_shot(self):
"""
Is true when the callback should be unregistered after being
called.
::
>>> callback = supriya.osc.OscCallback(
... address_pattern='/*',
... procedure=lambda x: print('GOT:', x),
... )
>>> callback.is_one_shot
False
Returns boolean.
"""
return self._is_one_shot
@property
def procedure(self):
"""
Gets the procedure to be called.
Returns callable.
"""
return self._procedure
| {
"content_hash": "f0581114f0256f9882052aeeb2d1ae85",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 88,
"avg_line_length": 23.182795698924732,
"alnum_prop": 0.5139146567717996,
"repo_name": "Pulgama/supriya",
"id": "f7e28cb379dbd8ef9f762083914892978af4a43f",
"size": "2156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/osc/OscCallback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
} |
"""The tests the for Traccar device tracker platform."""
from unittest.mock import patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import traccar, zone
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.traccar import DOMAIN, TRACKER_UPDATE
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
HOME_LATITUDE = 37.239622
HOME_LONGITUDE = -115.815811
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
@pytest.fixture(name="client")
async def traccar_client(loop, hass, aiohttp_client):
"""Mock client for Traccar (unauthenticated)."""
assert await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
with patch("homeassistant.components.device_tracker.legacy.update_config"):
return await aiohttp_client(hass.http.app)
@pytest.fixture(autouse=True)
async def setup_zones(loop, hass):
"""Set up Zone config in HA."""
assert await async_setup_component(
hass,
zone.DOMAIN,
{
"zone": {
"name": "Home",
"latitude": HOME_LATITUDE,
"longitude": HOME_LONGITUDE,
"radius": 100,
}
},
)
await hass.async_block_till_done()
@pytest.fixture(name="webhook_id")
async def webhook_id_fixture(hass, client):
"""Initialize the Traccar component and get the webhook_id."""
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
return result["result"].data["webhook_id"]
async def test_missing_data(hass, client, webhook_id):
"""Test missing data."""
url = f"/api/webhook/{webhook_id}"
data = {"lat": "1.0", "lon": "1.1", "id": "123"}
# No data
req = await client.post(url)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy["lat"]
req = await client.post(url, params=copy)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy["id"]
req = await client.post(url, params=copy)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, client, webhook_id):
"""Test when there is a known zone."""
url = f"/api/webhook/{webhook_id}"
data = {"lat": str(HOME_LATITUDE), "lon": str(HOME_LONGITUDE), "id": "123"}
# Enter the Home
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_HOME == state_name
# Enter Home again
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_HOME == state_name
data["lon"] = 0
data["lat"] = 0
# Enter Somewhere else
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_NOT_HOME == state_name
dev_reg = await hass.helpers.device_registry.async_get_registry()
assert len(dev_reg.devices) == 1
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert len(ent_reg.entities) == 1
async def test_enter_with_attrs(hass, client, webhook_id):
"""Test when additional attributes are present."""
url = f"/api/webhook/{webhook_id}"
data = {
"timestamp": 123456789,
"lat": "1.0",
"lon": "1.1",
"id": "123",
"accuracy": "10.5",
"batt": 10,
"speed": 100,
"bearing": "105.32",
"altitude": 102,
}
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"]))
assert state.state == STATE_NOT_HOME
assert state.attributes["gps_accuracy"] == 10.5
assert state.attributes["battery_level"] == 10.0
assert state.attributes["speed"] == 100.0
assert state.attributes["bearing"] == 105.32
assert state.attributes["altitude"] == 102.0
data = {
"lat": str(HOME_LATITUDE),
"lon": str(HOME_LONGITUDE),
"id": "123",
"accuracy": 123,
"batt": 23,
"speed": 23,
"bearing": 123,
"altitude": 123,
}
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"]))
assert state.state == STATE_HOME
assert state.attributes["gps_accuracy"] == 123
assert state.attributes["battery_level"] == 23
assert state.attributes["speed"] == 23
assert state.attributes["bearing"] == 123
assert state.attributes["altitude"] == 123
async def test_two_devices(hass, client, webhook_id):
"""Test updating two different devices."""
url = f"/api/webhook/{webhook_id}"
data_device_1 = {"lat": "1.0", "lon": "1.1", "id": "device_1"}
# Exit Home
req = await client.post(url, params=data_device_1)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["id"]))
assert state.state == "not_home"
# Enter Home
data_device_2 = dict(data_device_1)
data_device_2["lat"] = str(HOME_LATITUDE)
data_device_2["lon"] = str(HOME_LONGITUDE)
data_device_2["id"] = "device_2"
req = await client.post(url, params=data_device_2)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_2["id"]))
assert state.state == "home"
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["id"]))
assert state.state == "not_home"
@pytest.mark.xfail(
reason="The device_tracker component does not support unloading yet."
)
async def test_load_unload_entry(hass, client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = f"/api/webhook/{webhook_id}"
data = {"lat": str(HOME_LATITUDE), "lon": str(HOME_LONGITUDE), "id": "123"}
# Enter the Home
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_HOME == state_name
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert await traccar.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
| {
"content_hash": "21c026b9235831bd82e9fa4fe91f1d8a",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 87,
"avg_line_length": 32.34552845528455,
"alnum_prop": 0.6404423777805706,
"repo_name": "turbokongen/home-assistant",
"id": "c7e031b2ca6f18865dc951e6618c1b58e2ee277d",
"size": "7957",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/traccar/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
# 'sphinx.ext.mathjax', # One of mathjax, pngmath or imgmath
# 'sphinx.ext.imgmath'
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'github', # for GitHub links,
'numpydoc', # numpydoc or napoleon, but not both
# 'sphinx.ext.napoleon'
]
ipython_savefig_dir = '../build/html/_static'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'statsmodels'
copyright = u'2009-2017, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
autosummary_generate = True
autoclass_content = 'class'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from statsmodels.version import short_version, full_version
release = short_version
# The full version, including dev tag.
version = full_version
# set inheritance_graph_attrs
# you need graphviz installed to use this
# see: http://sphinx.pocoo.org/ext/inheritance.html
# and graphviz dot documentation http://www.graphviz.org/content/attrs
#NOTE: giving the empty string to size allows graphviz to figure out
# the size
inheritance_graph_attrs = dict(size='""', ratio="compress", fontsize=14,
rankdir="LR")
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
# color='dodgerblue1', style='filled')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*/autosummary/class.rst', '*/autosummary/glmfamilies.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if 'htmlhelp' in sys.argv:
#html_theme = 'statsmodels_htmlhelp' #doesn't look nice yet
html_theme = 'default'
print('################# using statsmodels_htmlhelp ############')
else:
html_theme = 'statsmodels'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/statsmodels_hybi_banner.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/statsmodels_hybi_favico.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index' : ['indexsidebar.html','searchbox.html','sidelinks.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'statsmodelsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'statsmodels.tex', u'statsmodels Documentation',
u'Josef Perktold, Skipper Seabold', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# imgmath options
imgmath_image_format = 'png'
imgmath_latex_preamble = r'\usepackage[active]{preview}'
imgmath_use_preview = True
# pngmath options
# http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
pngmath_latex_preamble=r'\usepackage[active]{preview}' # + other custom stuff for inline math, such as non-default math fonts etc.
pngmath_use_preview=True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'statsmodels', u'statsmodels Documentation',
[u'Josef Perktold, Skipper Seabold, Jonathan Taylor'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'statsmodels'
epub_author = u'Josef Perktold, Skipper Seabold'
epub_publisher = u'Josef Perktold, Skipper Seabold'
epub_copyright = u'2009-2017, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy' : ('https://docs.scipy.org/doc/numpy/', None),
'python' : ('https://docs.python.org/3.2', None),
'pydagogue' : ('http://matthew-brett.github.io/pydagogue/', None),
'patsy' : ('http://patsy.readthedocs.io/en/latest/', None),
'pandas' : ('http://pandas.pydata.org/pandas-docs/stable/', None),
}
from os.path import dirname, abspath, join
plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
# ghissue config
github_project_url = "https://github.com/statsmodels/statsmodels"
# for the examples landing page
import json
example_context = json.load(open('examples/landing.json'))
html_context = {'examples': example_context }
# --------------- DOCTEST -------------------
doctest_global_setup = """
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import pandas as pd
""" | {
"content_hash": "c9775693874ce73ce275095fb3512b0e",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 130,
"avg_line_length": 34.632716049382715,
"alnum_prop": 0.6942340254879245,
"repo_name": "bert9bert/statsmodels",
"id": "2b155e9d5deff952aa21cb86c07d095aa5f2da44",
"size": "11643",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "9844784"
},
{
"name": "R",
"bytes": "55204"
},
{
"name": "Stata",
"bytes": "54989"
}
],
"symlink_target": ""
} |
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = '[email protected] (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self, connection=None):
return 'VARCHAR'
def to_python(self, value):
if not value:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self, connection=None):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
| {
"content_hash": "e629d00c3f06e8775c3f7f2d57cc9a51",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 26.18,
"alnum_prop": 0.6963330786860199,
"repo_name": "jjinux/party-playlist-picker",
"id": "c818ea244f620c10096abe7377de648c1919e022",
"size": "3199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third-party/oauth2client/django_orm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3116"
},
{
"name": "HTML",
"bytes": "7914"
},
{
"name": "JavaScript",
"bytes": "11041"
},
{
"name": "Python",
"bytes": "51122"
}
],
"symlink_target": ""
} |
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ADMINS = (
('DnDtools', '[email protected]'),
)
MANAGERS = ADMINS
TIME_ZONE = 'Europe/Prague'
LANGUAGE_CODE = 'en-us'
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
USE_I18N = False
USE_L10N = False
ADMIN_MEDIA_PREFIX = '/media/'
MIDDLEWARE_CLASSES = (
'dnd.mobile.middleware.MobileMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pagination.middleware.PaginationMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'LoginRequiredMiddleware.LoginRequiredMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'dnd.context_processors.unread_news',
'dnd.context_processors.disable_social',
'dnd.context_processors.is_mobile',
'dnd.context_processors.is_admin',
'dnd.context_processors.menu_constants',
)
ROOT_URLCONF = 'dndproject.urls'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'dnd',
'south',
'debug_toolbar',
'django.contrib.sitemaps',
'LoginRequiredMiddleware',
)
SERVER_EMAIL = '[email protected]'
USE_TZ = False
# LOCAL PY
LOGIN_URL = '/login'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (STATIC_DIR, )
SITE_ID = 1
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
RECAPTCHA_PUBLIC = ''
WSGI_APPLICATION = 'dndproject.wsgi.application'
from dndproject.local import * | {
"content_hash": "16d32cc00cb286aac78a1e204510893c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 67,
"avg_line_length": 25.710526315789473,
"alnum_prop": 0.7195496417604913,
"repo_name": "FreezyExp/dndtools",
"id": "34dd6dbcfce18a21961470f1c9e0d506455a66dd",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dndtools/dndproject/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53351"
},
{
"name": "HTML",
"bytes": "197174"
},
{
"name": "JavaScript",
"bytes": "1941"
},
{
"name": "Python",
"bytes": "392237"
}
],
"symlink_target": ""
} |
"""
Bootstrapper for test framework plugins.
The entire rationale for this system is to get the modules in plugin/
imported without importing all of the supporting library, so that we can
set up things for testing before coverage starts.
The rationale for all of plugin/ being *in* the supporting library in the
first place is so that the testing and plugin suite is available to other
libraries, mainly external SQLAlchemy and Alembic dialects, to make use
of the same test environment and standard suites available to
SQLAlchemy/Alembic themselves without the need to ship/install a separate
package outside of SQLAlchemy.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0.
"""
import os
import sys
bootstrap_file = locals()["bootstrap_file"]
to_bootstrap = locals()["to_bootstrap"]
def load_file_as_module(name):
path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
if sys.version_info >= (3, 3):
from importlib import machinery
mod = machinery.SourceFileLoader(name, path).load_module()
else:
import imp
mod = imp.load_source(name, path)
return mod
if to_bootstrap == "pytest":
sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin")
else:
raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
| {
"content_hash": "bcd29916a795ed00021f9b7cf836f6fb",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 32.62222222222222,
"alnum_prop": 0.7377384196185286,
"repo_name": "kawamon/hue",
"id": "a95c947e2002842d88385e2482d17e805829e64a",
"size": "1468",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/testing/plugin/bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
import argparse
from six.moves import configparser, StringIO
import logging
import os
import platform
import sys
import yaml
import jenkins_jobs.version
from jenkins_jobs.builder import Builder
from jenkins_jobs.errors import JenkinsJobsException
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
DEFAULT_CONF = """
[job_builder]
keep_descriptions=False
ignore_cache=False
recursive=False
allow_duplicates=False
[jenkins]
url=http://localhost:8080/
send-as=Jenkins
user=
password=
[hipchat]
authtoken=dummy
"""
def confirm(question):
answer = raw_input('%s (Y/N): ' % question).upper().strip()
if not answer == 'Y':
sys.exit('Aborted')
def recurse_path(root):
basepath = os.path.realpath(root)
pathlist = [basepath]
for root, dirs, files in os.walk(basepath, topdown=True):
pathlist.extend([os.path.join(root, path) for path in dirs])
return pathlist
def create_parser():
parser = argparse.ArgumentParser()
recursive_parser = argparse.ArgumentParser(add_help=False)
recursive_parser.add_argument('-r', '--recursive', action='store_true',
dest='recursive', default=False,
help='look for yaml files recursively')
subparser = parser.add_subparsers(help='update, test or delete job',
dest='command')
# subparser: update
parser_update = subparser.add_parser('update', parents=[recursive_parser])
parser_update.add_argument('path', help='colon-separated list of paths to'
' YAML files or directories')
parser_update.add_argument('names', help='name(s) of job(s)', nargs='*')
parser_update.add_argument('--delete-old', help='delete obsolete jobs',
action='store_true',
dest='delete_old', default=False,)
# subparser: test
parser_test = subparser.add_parser('test', parents=[recursive_parser])
parser_test.add_argument('path', help='colon-separated list of paths to'
' YAML files or directories',
nargs='?', default=sys.stdin)
parser_test.add_argument('-p', dest='plugins_info_path', default=None,
help='path to plugin info YAML file')
parser_test.add_argument('-o', dest='output_dir', default=sys.stdout,
help='path to output XML')
parser_test.add_argument('name', help='name(s) of job(s)', nargs='*')
# subparser: delete
parser_delete = subparser.add_parser('delete')
parser_delete.add_argument('name', help='name of job', nargs='+')
parser_delete.add_argument('-p', '--path', default=None,
help='colon-separated list of paths to'
' YAML files or directories')
# subparser: delete-all
subparser.add_parser('delete-all',
help='delete *ALL* jobs from Jenkins server, '
'including those not managed by Jenkins Job '
'Builder.')
parser.add_argument('--conf', dest='conf', help='configuration file')
parser.add_argument('-l', '--log_level', dest='log_level', default='info',
help="log level (default: %(default)s)")
parser.add_argument(
'--ignore-cache', action='store_true',
dest='ignore_cache', default=False,
help='ignore the cache and update the jobs anyhow (that will only '
'flush the specified jobs cache)')
parser.add_argument(
'--flush-cache', action='store_true', dest='flush_cache',
default=False, help='flush all the cache entries before updating')
parser.add_argument('--version', dest='version', action='version',
version=version(),
help='show version')
return parser
def main(argv=None):
# We default argv to None and assign to sys.argv[1:] below because having
# an argument default value be a mutable type in Python is a gotcha. See
# http://bit.ly/1o18Vff
if argv is None:
argv = sys.argv[1:]
parser = create_parser()
options = parser.parse_args(argv)
if not options.command:
parser.error("Must specify a 'command' to be performed")
if (options.log_level is not None):
options.log_level = getattr(logging, options.log_level.upper(),
logger.getEffectiveLevel())
logger.setLevel(options.log_level)
config = setup_config_settings(options)
execute(options, config)
def setup_config_settings(options):
conf = '/etc/jenkins_jobs/jenkins_jobs.ini'
if options.conf:
conf = options.conf
else:
# Fallback to script directory
localconf = os.path.join(os.path.dirname(__file__),
'jenkins_jobs.ini')
if os.path.isfile(localconf):
conf = localconf
config = configparser.ConfigParser()
## Load default config always
config.readfp(StringIO(DEFAULT_CONF))
if os.path.isfile(conf):
logger.debug("Reading config from {0}".format(conf))
conffp = open(conf, 'r')
config.readfp(conffp)
elif options.command == 'test':
logger.debug("Not requiring config for test output generation")
else:
raise JenkinsJobsException(
"A valid configuration file is required when not run as a test"
"\n{0} is not a valid .ini file".format(conf))
return config
def execute(options, config):
logger.debug("Config: {0}".format(config))
# check the ignore_cache setting: first from command line,
# if not present check from ini file
ignore_cache = False
if options.ignore_cache:
ignore_cache = options.ignore_cache
elif config.has_option('jenkins', 'ignore_cache'):
logging.warn('ignore_cache option should be moved to the [job_builder]'
' section in the config file, the one specified in the '
'[jenkins] section will be ignored in the future')
ignore_cache = config.getboolean('jenkins', 'ignore_cache')
elif config.has_option('job_builder', 'ignore_cache'):
ignore_cache = config.getboolean('job_builder', 'ignore_cache')
# workaround for python 2.6 interpolation error
# https://bugs.launchpad.net/openstack-ci/+bug/1259631
try:
user = config.get('jenkins', 'user')
except (TypeError, configparser.NoOptionError):
user = None
try:
password = config.get('jenkins', 'password')
except (TypeError, configparser.NoOptionError):
password = None
plugins_info = None
if getattr(options, 'plugins_info_path', None) is not None:
with open(options.plugins_info_path, 'r') as yaml_file:
plugins_info = yaml.load(yaml_file)
if not isinstance(plugins_info, list):
raise JenkinsJobsException("{0} must contain a Yaml list!"
.format(options.plugins_info_path))
builder = Builder(config.get('jenkins', 'url'),
user,
password,
config,
ignore_cache=ignore_cache,
flush_cache=options.flush_cache,
plugins_list=plugins_info)
if getattr(options, 'path', None):
if options.path == sys.stdin:
logger.debug("Input file is stdin")
if options.path.isatty():
key = 'CTRL+Z' if platform.system() == 'Windows' else 'CTRL+D'
logger.warn(
"Reading configuration from STDIN. Press %s to end input.",
key)
# take list of paths
options.path = options.path.split(os.pathsep)
do_recurse = (getattr(options, 'recursive', False) or
config.getboolean('job_builder', 'recursive'))
paths = []
for path in options.path:
if do_recurse and os.path.isdir(path):
paths.extend(recurse_path(path))
else:
paths.append(path)
options.path = paths
if options.command == 'delete':
for job in options.name:
builder.delete_job(job, options.path)
elif options.command == 'delete-all':
confirm('Sure you want to delete *ALL* jobs from Jenkins server?\n'
'(including those not managed by Jenkins Job Builder)')
logger.info("Deleting all jobs")
builder.delete_all_jobs()
elif options.command == 'update':
logger.info("Updating jobs in {0} ({1})".format(
options.path, options.names))
jobs = builder.update_job(options.path, options.names)
if options.delete_old:
builder.delete_old_managed(keep=[x.name for x in jobs])
elif options.command == 'test':
builder.update_job(options.path, options.name,
output=options.output_dir)
def version():
return "Jenkins Job Builder version: %s" % \
jenkins_jobs.version.version_info.version_string()
if __name__ == '__main__':
sys.path.insert(0, '.')
main()
| {
"content_hash": "28bf747ac1ecb47a274cc04a7ac36866",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 36.91269841269841,
"alnum_prop": 0.5949258224037841,
"repo_name": "waynr/jenkins-job-builder",
"id": "498066d2b920559df2db59d6ca9febf28a20adcb",
"size": "9909",
"binary": false,
"copies": "1",
"ref": "refs/heads/experimental",
"path": "jenkins_jobs/cmd.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "680"
},
{
"name": "Python",
"bytes": "595324"
},
{
"name": "Shell",
"bytes": "869"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include, handler404, handler500
from django.contrib import admin
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseServerError
admin.autodiscover()
handler404 = 'allaccess.tests.urls.test_404'
handler500 = 'allaccess.tests.urls.test_500'
def error(request):
return HttpResponse('Error')
def home(request):
return HttpResponse('Home')
def login(request):
return HttpResponse('Login')
def test_404(request):
return HttpResponseNotFound()
def test_500(request):
return HttpResponseServerError()
urlpatterns = [
url(r'^allaccess/', include('allaccess.urls')),
url(r'^allaccess/', include('allaccess.tests.custom.urls')),
url(r'^error/$', error),
url(r'^login/$', login),
url(r'^$', home),
]
| {
"content_hash": "2a8d27dc7c9092049091f312e394a7af",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 83,
"avg_line_length": 21,
"alnum_prop": 0.7130325814536341,
"repo_name": "dpoirier/django-all-access",
"id": "6cff8dd4405d42392b4e0d91fab3e1bcd426a174",
"size": "798",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "allaccess/tests/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "121919"
}
],
"symlink_target": ""
} |
from django.db import models
from django.test import TestCase
# use immediate_update on tests
from django.conf import settings
settings.SEARCH_BACKEND = 'search.backends.immediate_update'
from search import register
from search.core import SearchManager, startswith
# ExtraData is used for ForeignKey tests
class ExtraData(models.Model):
name = models.CharField(max_length=500)
description = models.CharField(max_length=500)
def __unicode__(self):
return self.name
class Indexed(models.Model):
extra_data = models.ForeignKey(ExtraData, related_name='indexed_model', null=True)
extra_data2 = models.ForeignKey(ExtraData, null=True)
# Test normal and prefix index
one = models.CharField(max_length=500, null=True)
two = models.CharField(max_length=500)
check = models.BooleanField()
value = models.CharField(max_length=500)
register(Indexed, 'one', search_index='one_index', indexer=startswith)
register(Indexed, ('one', 'two'), search_index='one_two_index')
register(Indexed, 'value', integrate=('one', 'check'), search_index='value_index')
# Test filters
class FiltersIndexed(models.Model):
value = models.CharField(max_length=500)
check = models.BooleanField()
register(FiltersIndexed, 'value', filters={'check':True, }, search_index='checked_index')
class TestIndexed(TestCase):
def setUp(self):
extra_data = ExtraData()
extra_data.save()
Indexed(one=u'foo', two='bar').save()
Indexed(one=u'foo_2', two='bar').save()
for i in range(3):
Indexed(extra_data=extra_data, one=u'OneOne%d' % i).save()
for i in range(3):
Indexed(extra_data=extra_data, one=u'one%d' % i, two='two%d' % i).save()
for i in range(3):
Indexed(extra_data=extra_data, one=(None, u'ÜÄÖ-+!#><|', 'blub')[i],
check=bool(i%2), value=u'value%d test-word' % i).save()
for i in range(3):
FiltersIndexed(check=bool(i%2), value=u'value%d test-word' % i).save()
def test_setup(self):
self.assertEqual(1, len(Indexed.one_two_index.search('foo bar')))
self.assertEqual(len(Indexed.one_index.search('oneo')), 3)
self.assertEqual(len(Indexed.one_index.search('one')), 6)
self.assertEqual(len(Indexed.one_two_index.search('one2')), 1)
self.assertEqual(len(Indexed.one_two_index.search('two')), 0)
self.assertEqual(len(Indexed.one_two_index.search('two1')), 1)
self.assertEqual(len(Indexed.value_index.search('word')), 3)
self.assertEqual(len(Indexed.value_index.search('test-word')), 3)
self.assertEqual(len(Indexed.value_index.search('value0').filter(
check=False)), 1)
self.assertEqual(len(Indexed.value_index.search('value1').filter(
check=True, one=u'ÜÄÖ-+!#><|')), 1)
self.assertEqual(len(Indexed.value_index.search('value2').filter(
check__exact=False, one='blub')), 1)
# test filters
self.assertEqual(len(FiltersIndexed.checked_index.search('test-word')), 1)
self.assertEqual(len(Indexed.value_index.search('foobar')), 0)
def test_change(self):
one = Indexed.one_index.search('oNeone1').get()
one.one = 'oneoneone'
one.save()
value = Indexed.value_index.search('value0').get()
value.value = 'value1 test-word'
value.save()
value.one = 'shidori'
value.value = 'value3 rasengan/shidori'
value.save()
self.assertEqual(len(Indexed.value_index.search('rasengan')), 1)
self.assertEqual(len(Indexed.value_index.search('value3')), 1)
value = Indexed.value_index.search('value3').get()
value.delete()
self.assertEqual(len(Indexed.value_index.search('value3')), 0)
| {
"content_hash": "bdda3857105282b99d3b9eaf78c5987e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 89,
"avg_line_length": 36.99029126213592,
"alnum_prop": 0.6456692913385826,
"repo_name": "redebian/documentation",
"id": "58842f74aa83fdded6fb96496214e28642dffd83",
"size": "3850",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "search/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157873"
},
{
"name": "JavaScript",
"bytes": "238413"
},
{
"name": "Python",
"bytes": "4328419"
},
{
"name": "Shell",
"bytes": "5114"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class ApplicationInsightsComponentPaged(Paged):
"""
A paging container for iterating over a list of :class:`ApplicationInsightsComponent <azure.mgmt.applicationinsights.models.ApplicationInsightsComponent>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ApplicationInsightsComponent]'}
}
def __init__(self, *args, **kwargs):
super(ApplicationInsightsComponentPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "0e71c132e77703573d8a3e7e46e7c213",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 165,
"avg_line_length": 34.875,
"alnum_prop": 0.6738351254480287,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "7283609bd08a014231122bff93fac8aff362a1e6",
"size": "1032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/application_insights_component_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import warnings
from factory import base
from factory import declarations
from factory import enums
from factory import errors
from .compat import unittest
class TestObject(object):
def __init__(self, one=None, two=None, three=None, four=None):
self.one = one
self.two = two
self.three = three
self.four = four
class FakeDjangoModel(object):
@classmethod
def create(cls, **kwargs):
instance = cls(**kwargs)
instance.id = 1
return instance
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
self.id = None
class FakeModelFactory(base.Factory):
class Meta:
abstract = True
@classmethod
def _create(cls, model_class, *args, **kwargs):
return model_class.create(**kwargs)
class TestModel(FakeDjangoModel):
pass
class SafetyTestCase(unittest.TestCase):
def test_base_factory(self):
self.assertRaises(errors.FactoryError, base.BaseFactory)
class AbstractFactoryTestCase(unittest.TestCase):
def test_factory_for_optional(self):
"""Ensure that model= is optional for abstract=True."""
class TestObjectFactory(base.Factory):
class Meta:
abstract = True
self.assertTrue(TestObjectFactory._meta.abstract)
self.assertIsNone(TestObjectFactory._meta.model)
def test_factory_for_and_abstract_factory_optional(self):
"""Ensure that Meta.abstract is optional."""
class TestObjectFactory(base.Factory):
pass
self.assertTrue(TestObjectFactory._meta.abstract)
self.assertIsNone(TestObjectFactory._meta.model)
def test_abstract_factory_cannot_be_called(self):
class TestObjectFactory(base.Factory):
pass
self.assertRaises(errors.FactoryError, TestObjectFactory.build)
self.assertRaises(errors.FactoryError, TestObjectFactory.create)
def test_abstract_factory_not_inherited(self):
"""abstract=True isn't propagated to child classes."""
class TestObjectFactory(base.Factory):
class Meta:
abstract = True
model = TestObject
class TestObjectChildFactory(TestObjectFactory):
pass
self.assertFalse(TestObjectChildFactory._meta.abstract)
def test_abstract_or_model_is_required(self):
class TestObjectFactory(base.Factory):
class Meta:
abstract = False
model = None
self.assertRaises(errors.FactoryError, TestObjectFactory.build)
self.assertRaises(errors.FactoryError, TestObjectFactory.create)
class OptionsTests(unittest.TestCase):
def test_base_attrs(self):
class AbstractFactory(base.Factory):
pass
# Declarative attributes
self.assertTrue(AbstractFactory._meta.abstract)
self.assertIsNone(AbstractFactory._meta.model)
self.assertEqual((), AbstractFactory._meta.inline_args)
self.assertEqual((), AbstractFactory._meta.exclude)
self.assertEqual(enums.CREATE_STRATEGY, AbstractFactory._meta.strategy)
# Non-declarative attributes
self.assertEqual({}, AbstractFactory._meta.pre_declarations.as_dict())
self.assertEqual({}, AbstractFactory._meta.post_declarations.as_dict())
self.assertEqual(AbstractFactory, AbstractFactory._meta.factory)
self.assertEqual(base.Factory, AbstractFactory._meta.base_factory)
self.assertEqual(AbstractFactory._meta, AbstractFactory._meta.counter_reference)
def test_declaration_collecting(self):
lazy = declarations.LazyFunction(int)
lazy2 = declarations.LazyAttribute(lambda _o: 1)
postgen = declarations.PostGenerationDeclaration()
class AbstractFactory(base.Factory):
x = 1
y = lazy
y2 = lazy2
z = postgen
# Declarations aren't removed
self.assertEqual(1, AbstractFactory.x)
self.assertEqual(lazy, AbstractFactory.y)
self.assertEqual(lazy2, AbstractFactory.y2)
self.assertEqual(postgen, AbstractFactory.z)
# And are available in class Meta
self.assertEqual(
{'x': 1, 'y': lazy, 'y2': lazy2},
AbstractFactory._meta.pre_declarations.as_dict(),
)
self.assertEqual(
{'z': postgen},
AbstractFactory._meta.post_declarations.as_dict(),
)
def test_inherited_declaration_collecting(self):
lazy = declarations.LazyFunction(int)
lazy2 = declarations.LazyAttribute(lambda _o: 2)
postgen = declarations.PostGenerationDeclaration()
postgen2 = declarations.PostGenerationDeclaration()
class AbstractFactory(base.Factory):
x = 1
y = lazy
z = postgen
class OtherFactory(AbstractFactory):
a = lazy2
b = postgen2
# Declarations aren't removed
self.assertEqual(lazy2, OtherFactory.a)
self.assertEqual(postgen2, OtherFactory.b)
self.assertEqual(1, OtherFactory.x)
self.assertEqual(lazy, OtherFactory.y)
self.assertEqual(postgen, OtherFactory.z)
# And are available in class Meta
self.assertEqual(
{'x': 1, 'y': lazy, 'a': lazy2},
OtherFactory._meta.pre_declarations.as_dict(),
)
self.assertEqual(
{'z': postgen, 'b': postgen2},
OtherFactory._meta.post_declarations.as_dict(),
)
def test_inherited_declaration_shadowing(self):
lazy = declarations.LazyFunction(int)
lazy2 = declarations.LazyAttribute(lambda _o: 2)
postgen = declarations.PostGenerationDeclaration()
postgen2 = declarations.PostGenerationDeclaration()
class AbstractFactory(base.Factory):
x = 1
y = lazy
z = postgen
class OtherFactory(AbstractFactory):
y = lazy2
z = postgen2
# Declarations aren't removed
self.assertEqual(1, OtherFactory.x)
self.assertEqual(lazy2, OtherFactory.y)
self.assertEqual(postgen2, OtherFactory.z)
# And are available in class Meta
self.assertEqual(
{'x': 1, 'y': lazy2},
OtherFactory._meta.pre_declarations.as_dict(),
)
self.assertEqual(
{'z': postgen2},
OtherFactory._meta.post_declarations.as_dict(),
)
class DeclarationParsingTests(unittest.TestCase):
def test_classmethod(self):
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
@classmethod
def some_classmethod(cls):
return cls.create()
self.assertTrue(hasattr(TestObjectFactory, 'some_classmethod'))
obj = TestObjectFactory.some_classmethod()
self.assertEqual(TestObject, obj.__class__)
class FactoryTestCase(unittest.TestCase):
def test_magic_happens(self):
"""Calling a FooFactory doesn't yield a FooFactory instance."""
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
self.assertEqual(TestObject, TestObjectFactory._meta.model)
obj = TestObjectFactory.build()
self.assertFalse(hasattr(obj, '_meta'))
def test_display(self):
class TestObjectFactory(base.Factory):
class Meta:
model = FakeDjangoModel
self.assertIn('TestObjectFactory', str(TestObjectFactory))
self.assertIn('FakeDjangoModel', str(TestObjectFactory))
def test_lazy_attribute_non_existent_param(self):
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
one = declarations.LazyAttribute(lambda a: a.does_not_exist )
self.assertRaises(AttributeError, TestObjectFactory)
def test_inheritance_with_sequence(self):
"""Tests that sequence IDs are shared between parent and son."""
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
one = declarations.Sequence(lambda a: a)
class TestSubFactory(TestObjectFactory):
class Meta:
model = TestObject
pass
parent = TestObjectFactory.build()
sub = TestSubFactory.build()
alt_parent = TestObjectFactory.build()
alt_sub = TestSubFactory.build()
ones = set([x.one for x in (parent, alt_parent, sub, alt_sub)])
self.assertEqual(4, len(ones))
class FactorySequenceTestCase(unittest.TestCase):
def setUp(self):
super(FactorySequenceTestCase, self).setUp()
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
one = declarations.Sequence(lambda n: n)
self.TestObjectFactory = TestObjectFactory
def test_reset_sequence(self):
o1 = self.TestObjectFactory()
self.assertEqual(0, o1.one)
o2 = self.TestObjectFactory()
self.assertEqual(1, o2.one)
self.TestObjectFactory.reset_sequence()
o3 = self.TestObjectFactory()
self.assertEqual(0, o3.one)
def test_reset_sequence_with_value(self):
o1 = self.TestObjectFactory()
self.assertEqual(0, o1.one)
o2 = self.TestObjectFactory()
self.assertEqual(1, o2.one)
self.TestObjectFactory.reset_sequence(42)
o3 = self.TestObjectFactory()
self.assertEqual(42, o3.one)
def test_reset_sequence_subclass_fails(self):
"""Tests that the sequence of a 'slave' factory cannot be reseted."""
class SubTestObjectFactory(self.TestObjectFactory):
pass
self.assertRaises(ValueError, SubTestObjectFactory.reset_sequence)
def test_reset_sequence_subclass_force(self):
"""Tests that reset_sequence(force=True) works."""
class SubTestObjectFactory(self.TestObjectFactory):
pass
o1 = SubTestObjectFactory()
self.assertEqual(0, o1.one)
o2 = SubTestObjectFactory()
self.assertEqual(1, o2.one)
SubTestObjectFactory.reset_sequence(force=True)
o3 = SubTestObjectFactory()
self.assertEqual(0, o3.one)
# The master sequence counter has been reset
o4 = self.TestObjectFactory()
self.assertEqual(1, o4.one)
def test_reset_sequence_subclass_parent(self):
"""Tests that the sequence of a 'slave' factory cannot be reseted."""
class SubTestObjectFactory(self.TestObjectFactory):
pass
o1 = SubTestObjectFactory()
self.assertEqual(0, o1.one)
o2 = SubTestObjectFactory()
self.assertEqual(1, o2.one)
self.TestObjectFactory.reset_sequence()
o3 = SubTestObjectFactory()
self.assertEqual(0, o3.one)
o4 = self.TestObjectFactory()
self.assertEqual(1, o4.one)
class FactoryDefaultStrategyTestCase(unittest.TestCase):
def setUp(self):
self.default_strategy = base.Factory._meta.strategy
def tearDown(self):
base.Factory._meta.strategy = self.default_strategy
def test_build_strategy(self):
base.Factory._meta.strategy = enums.BUILD_STRATEGY
class TestModelFactory(base.Factory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory()
self.assertEqual(test_model.one, 'one')
self.assertFalse(test_model.id)
def test_create_strategy(self):
# Default Meta.strategy
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory()
self.assertEqual(test_model.one, 'one')
self.assertTrue(test_model.id)
def test_stub_strategy(self):
base.Factory._meta.strategy = enums.STUB_STRATEGY
class TestModelFactory(base.Factory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory()
self.assertEqual(test_model.one, 'one')
self.assertFalse(hasattr(test_model, 'id')) # We should have a plain old object
def test_unknown_strategy(self):
base.Factory._meta.strategy = 'unknown'
class TestModelFactory(base.Factory):
class Meta:
model = TestModel
one = 'one'
self.assertRaises(base.Factory.UnknownStrategy, TestModelFactory)
def test_stub_with_create_strategy(self):
class TestModelFactory(base.StubFactory):
class Meta:
model = TestModel
one = 'one'
TestModelFactory._meta.strategy = enums.CREATE_STRATEGY
self.assertRaises(base.StubFactory.UnsupportedStrategy, TestModelFactory)
def test_stub_with_build_strategy(self):
class TestModelFactory(base.StubFactory):
class Meta:
model = TestModel
one = 'one'
TestModelFactory._meta.strategy = enums.BUILD_STRATEGY
obj = TestModelFactory()
# For stubs, build() is an alias of stub().
self.assertFalse(isinstance(obj, TestModel))
def test_change_strategy(self):
@base.use_strategy(enums.CREATE_STRATEGY)
class TestModelFactory(base.StubFactory):
class Meta:
model = TestModel
one = 'one'
self.assertEqual(enums.CREATE_STRATEGY, TestModelFactory._meta.strategy)
class FactoryCreationTestCase(unittest.TestCase):
def test_factory_for(self):
class TestFactory(base.Factory):
class Meta:
model = TestObject
self.assertTrue(isinstance(TestFactory.build(), TestObject))
def test_stub(self):
class TestFactory(base.StubFactory):
pass
self.assertEqual(TestFactory._meta.strategy, enums.STUB_STRATEGY)
def test_inheritance_with_stub(self):
class TestObjectFactory(base.StubFactory):
class Meta:
model = TestObject
pass
class TestFactory(TestObjectFactory):
pass
self.assertEqual(TestFactory._meta.strategy, enums.STUB_STRATEGY)
def test_stub_and_subfactory(self):
class StubA(base.StubFactory):
class Meta:
model = TestObject
one = 'blah'
class StubB(base.StubFactory):
class Meta:
model = TestObject
stubbed = declarations.SubFactory(StubA, two='two')
b = StubB()
self.assertEqual('blah', b.stubbed.one)
self.assertEqual('two', b.stubbed.two)
def test_custom_creation(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
@classmethod
def _generate(cls, create, attrs):
attrs['four'] = 4
return super(TestModelFactory, cls)._generate(create, attrs)
b = TestModelFactory.build(one=1)
self.assertEqual(1, b.one)
self.assertEqual(4, b.four)
self.assertEqual(None, b.id)
c = TestModelFactory(one=1)
self.assertEqual(1, c.one)
self.assertEqual(4, c.four)
self.assertEqual(1, c.id)
# Errors
def test_no_associated_class(self):
class Test(base.Factory):
pass
self.assertTrue(Test._meta.abstract)
class PostGenerationParsingTestCase(unittest.TestCase):
def test_extraction(self):
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
foo = declarations.PostGenerationDeclaration()
self.assertIn('foo', TestObjectFactory._meta.post_declarations.as_dict())
def test_classlevel_extraction(self):
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
foo = declarations.PostGenerationDeclaration()
foo__bar = 42
self.assertIn('foo', TestObjectFactory._meta.post_declarations.as_dict())
self.assertIn('foo__bar', TestObjectFactory._meta.post_declarations.as_dict())
if __name__ == '__main__': # pragma: no cover
unittest.main()
| {
"content_hash": "22d7a8935f3f47e95a20cf090a8b2763",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 88,
"avg_line_length": 30.508348794063078,
"alnum_prop": 0.6237533446849914,
"repo_name": "rrauenza/factory_boy",
"id": "5e5c27b47a1e6e19658c2c9a16abdc41a6cb1cf9",
"size": "16504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2321"
},
{
"name": "Python",
"bytes": "309597"
}
],
"symlink_target": ""
} |
import os
import unittest
import random
from test import support
import _thread as thread
import time
import weakref
from test import lock_tests
NUMTASKS = 10
NUMTRIPS = 3
POLL_SLEEP = 0.010 # seconds = 10 ms
_print_mutex = thread.allocate_lock()
def verbose_print(arg):
"""Helper function for printing out debugging output."""
if support.verbose:
with _print_mutex:
print(arg)
class BasicThreadTest(unittest.TestCase):
def setUp(self):
self.done_mutex = thread.allocate_lock()
self.done_mutex.acquire()
self.running_mutex = thread.allocate_lock()
self.random_mutex = thread.allocate_lock()
self.created = 0
self.running = 0
self.next_ident = 0
key = support.threading_setup()
self.addCleanup(support.threading_cleanup, *key)
class ThreadRunningTests(BasicThreadTest):
def newtask(self):
with self.running_mutex:
self.next_ident += 1
verbose_print("creating task %s" % self.next_ident)
thread.start_new_thread(self.task, (self.next_ident,))
self.created += 1
self.running += 1
def task(self, ident):
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" % (ident, round(delay*1e6)))
time.sleep(delay)
verbose_print("task %s done" % ident)
with self.running_mutex:
self.running -= 1
if self.created == NUMTASKS and self.running == 0:
self.done_mutex.release()
def test_starting_threads(self):
with support.wait_threads_exit():
# Basic test for thread creation.
for i in range(NUMTASKS):
self.newtask()
verbose_print("waiting for tasks to complete...")
self.done_mutex.acquire()
verbose_print("all tasks done")
def test_stack_size(self):
# Various stack size tests.
self.assertEqual(thread.stack_size(), 0, "initial stack size is not 0")
thread.stack_size(0)
self.assertEqual(thread.stack_size(), 0, "stack_size not reset to default")
@unittest.skipIf(os.name not in ("nt", "posix"), 'test meant for nt and posix')
def test_nt_and_posix_stack_size(self):
try:
thread.stack_size(4096)
except ValueError:
verbose_print("caught expected ValueError setting "
"stack_size(4096)")
except thread.error:
self.skipTest("platform does not support changing thread stack "
"size")
fail_msg = "stack_size(%d) failed - should succeed"
for tss in (262144, 0x100000, 0):
thread.stack_size(tss)
self.assertEqual(thread.stack_size(), tss, fail_msg % tss)
verbose_print("successfully set stack_size(%d)" % tss)
for tss in (262144, 0x100000):
verbose_print("trying stack_size = (%d)" % tss)
self.next_ident = 0
self.created = 0
with support.wait_threads_exit():
for i in range(NUMTASKS):
self.newtask()
verbose_print("waiting for all tasks to complete")
self.done_mutex.acquire()
verbose_print("all tasks done")
thread.stack_size(0)
def test__count(self):
# Test the _count() function.
orig = thread._count()
mut = thread.allocate_lock()
mut.acquire()
started = []
def task():
started.append(None)
mut.acquire()
mut.release()
with support.wait_threads_exit():
thread.start_new_thread(task, ())
while not started:
time.sleep(POLL_SLEEP)
self.assertEqual(thread._count(), orig + 1)
# Allow the task to finish.
mut.release()
# The only reliable way to be sure that the thread ended from the
# interpreter's point of view is to wait for the function object to be
# destroyed.
done = []
wr = weakref.ref(task, lambda _: done.append(None))
del task
while not done:
time.sleep(POLL_SLEEP)
self.assertEqual(thread._count(), orig)
def test_unraisable_exception(self):
def task():
started.release()
raise ValueError("task failed")
started = thread.allocate_lock()
with support.catch_unraisable_exception() as cm:
with support.wait_threads_exit():
started.acquire()
thread.start_new_thread(task, ())
started.acquire()
self.assertEqual(str(cm.unraisable.exc_value), "task failed")
self.assertIs(cm.unraisable.object, task)
self.assertEqual(cm.unraisable.err_msg,
"Exception ignored in thread started by")
self.assertIsNotNone(cm.unraisable.exc_traceback)
class Barrier:
def __init__(self, num_threads):
self.num_threads = num_threads
self.waiting = 0
self.checkin_mutex = thread.allocate_lock()
self.checkout_mutex = thread.allocate_lock()
self.checkout_mutex.acquire()
def enter(self):
self.checkin_mutex.acquire()
self.waiting = self.waiting + 1
if self.waiting == self.num_threads:
self.waiting = self.num_threads - 1
self.checkout_mutex.release()
return
self.checkin_mutex.release()
self.checkout_mutex.acquire()
self.waiting = self.waiting - 1
if self.waiting == 0:
self.checkin_mutex.release()
return
self.checkout_mutex.release()
class BarrierTest(BasicThreadTest):
def test_barrier(self):
with support.wait_threads_exit():
self.bar = Barrier(NUMTASKS)
self.running = NUMTASKS
for i in range(NUMTASKS):
thread.start_new_thread(self.task2, (i,))
verbose_print("waiting for tasks to end")
self.done_mutex.acquire()
verbose_print("tasks done")
def task2(self, ident):
for i in range(NUMTRIPS):
if ident == 0:
# give it a good chance to enter the next
# barrier before the others are all out
# of the current one
delay = 0
else:
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" %
(ident, round(delay * 1e6)))
time.sleep(delay)
verbose_print("task %s entering %s" % (ident, i))
self.bar.enter()
verbose_print("task %s leaving barrier" % ident)
with self.running_mutex:
self.running -= 1
# Must release mutex before releasing done, else the main thread can
# exit and set mutex to None as part of global teardown; then
# mutex.release() raises AttributeError.
finished = self.running == 0
if finished:
self.done_mutex.release()
class LockTests(lock_tests.LockTests):
locktype = thread.allocate_lock
class TestForkInThread(unittest.TestCase):
def setUp(self):
self.read_fd, self.write_fd = os.pipe()
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork')
@support.reap_threads
def test_forkinthread(self):
status = "not set"
def thread1():
nonlocal status
# fork in a thread
pid = os.fork()
if pid == 0:
# child
try:
os.close(self.read_fd)
os.write(self.write_fd, b"OK")
finally:
os._exit(0)
else:
# parent
os.close(self.write_fd)
pid, status = os.waitpid(pid, 0)
with support.wait_threads_exit():
thread.start_new_thread(thread1, ())
self.assertEqual(os.read(self.read_fd, 2), b"OK",
"Unable to fork() in thread")
self.assertEqual(status, 0)
def tearDown(self):
try:
os.close(self.read_fd)
except OSError:
pass
try:
os.close(self.write_fd)
except OSError:
pass
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "fb0b872251a892d891f4ad5c138d28c4",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 83,
"avg_line_length": 32.357142857142854,
"alnum_prop": 0.5500174276751482,
"repo_name": "batermj/algorithm-challenger",
"id": "9f4801f47e3aaa456c87a713d71e8021dd7932e4",
"size": "8607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_thread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
"""
Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Service config file
"""
import os
PORT = os.environ["PORT"] if os.environ.get("PORT") is not None else 80
PROJECT_ID = os.environ.get("PROJECT_ID", "")
if PROJECT_ID != "":
os.environ["GOOGLE_CLOUD_PROJECT"] = PROJECT_ID
DATABASE_PREFIX = os.getenv("DATABASE_PREFIX", "")
SCOPES = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
]
COLLECTION = os.getenv("COLLECTION")
API_BASE_URL = os.getenv("API_BASE_URL")
SERVICE_NAME = os.getenv("SERVICE_NAME")
REDIS_HOST = os.getenv("REDIS_HOST")
| {
"content_hash": "1e011a29e18e26ed4c0eb122ec19733a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 31.38095238095238,
"alnum_prop": 0.7298937784522003,
"repo_name": "GoogleCloudPlatform/document-intake-accelerator",
"id": "b08dad08e0b4b57679d5b5652684e87eaf20dfc0",
"size": "1318",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "microservices/hitl_service/src/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20025"
},
{
"name": "Dockerfile",
"bytes": "4235"
},
{
"name": "HCL",
"bytes": "58065"
},
{
"name": "HTML",
"bytes": "3546"
},
{
"name": "JavaScript",
"bytes": "171699"
},
{
"name": "Python",
"bytes": "518924"
},
{
"name": "Shell",
"bytes": "20892"
}
],
"symlink_target": ""
} |
from dbaas_zabbix import database_providers
class ProviderFactory(object):
@classmethod
def get_provider_class(cls, driver_name, is_ha,):
for klass in available_providers():
name_eq_klass = driver_name == klass.__provider_name__
is_ha_eq_klass = is_ha == klass.__is_ha__
if name_eq_klass and is_ha_eq_klass:
return klass
raise NotImplementedError
@classmethod
def factory(cls, dbaas_api, **kwargs):
engine_name = dbaas_api.engine_name
is_ha = dbaas_api.is_ha
if kwargs.get('engine_name'):
engine_name = kwargs.get('engine_name')
del kwargs['engine_name']
if kwargs.get('is_ha'):
is_ha = kwargs.get('is_ha')
del kwargs['is_ha']
driver_class = cls.get_provider_class(engine_name, is_ha)
return driver_class(dbaas_api=dbaas_api, **kwargs)
def available_providers():
available_objects = dir(database_providers)
available_klasses = (
klass for klass in available_objects if 'Provider' in klass
)
return (database_providers.__getattribute__(klass)for klass in
available_klasses)
| {
"content_hash": "f659507bc4628c7f9ba025a2203af820",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 67,
"avg_line_length": 32.351351351351354,
"alnum_prop": 0.6090225563909775,
"repo_name": "globocom/dbaas-zabbix",
"id": "381f0cfa445492595a32c94a547c9adaf575d505",
"size": "1197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas_zabbix/provider_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2416"
},
{
"name": "Python",
"bytes": "58819"
},
{
"name": "Shell",
"bytes": "30"
}
],
"symlink_target": ""
} |
import os
import subprocess
class AbstractBrowser:
_binary = None
def __init__(self, url, user_data_dir):
self.user_data_dir = os.path.join(user_data_dir, self._binary)
self.url = url
if not os.path.exists(self.user_data_dir):
os.makedirs(self.user_data_dir)
@staticmethod
def _available(binary):
extensions = os.environ.get("PATHEXT", "").split(os.pathsep)
for directory in os.environ.get("PATH", "").split(os.pathsep):
base = os.path.join(directory, binary)
options = [base] + [(base + ext) for ext in extensions]
for filename in options:
if os.path.exists(filename):
return True
return False
def _start(self, args):
print("running: " + self._binary)
try:
subprocess.check_output([self._binary] + args)
except subprocess.CalledProcessError as e:
print(e.output)
return e.returncode
except Exception as e:
print(e)
return -1
return 0
def start(self):
return -1
@staticmethod
def available():
return False
class Chrome(AbstractBrowser):
_binary = "google-chrome"
@staticmethod
def available():
return AbstractBrowser._available(Chrome._binary)
def start(self):
args = ["--app=%s" % self.url]
args += ["--user-data-dir=%s" % self.user_data_dir]
return self._start(args)
class Chromium(Chrome):
_binary = "xchromium"
@staticmethod
def available():
return AbstractBrowser._available(Chromium._binary)
class Firefox(AbstractBrowser):
_binary = "firefox"
@staticmethod
def available():
return AbstractBrowser._available(Firefox._binary)
def start(self):
args = ["--profile", self.user_data_dir]
args += ["--no-remote"]
args += [self.url]
return self._start(args)
class Browser:
def __init__(self, url, user_data_dir=None):
self.client = None
for cls in [Chrome, Chromium, Firefox]:
if cls.available():
self.client = cls(url, user_data_dir)
break
if self.client is None:
raise Exception("No suitable client found!")
def start(self):
return self.client.start()
| {
"content_hash": "92815df3ee29a85b04eb102adb175290",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 70,
"avg_line_length": 21.763636363636362,
"alnum_prop": 0.5693400167084378,
"repo_name": "wendlers/edubot-snap",
"id": "79094b1325f9f2bc6884b041da0987bd432b2b90",
"size": "3520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/edubot/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1523"
},
{
"name": "Python",
"bytes": "867220"
},
{
"name": "Shell",
"bytes": "215"
}
],
"symlink_target": ""
} |
"""
Finds gaps between consecutive protein residues in the PDB.
Detects gaps both by a distance criterion or discontinuous residue numbering.
Only applies to protein residues.
Usage:
python pdb_gap.py <pdb file>
Example:
python pdb_gap.py 1CTF.pdb
This program is part of the `pdb-tools` suite of utilities and should not be
distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB
files using the terminal, and can be used sequentially, with one tool streaming
data to another. They are based on old FORTRAN77 code that was taking too much
effort to maintain and compile. RIP.
"""
import os
import sys
__author__ = "Joao Rodrigues"
__email__ = "[email protected]"
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
else: # Whatever ...
emsg = 'ERROR!! Script takes 1 argument, not \'{}\'\n'
sys.stderr.write(emsg.format(len(args)))
sys.stderr.write(__doc__)
sys.exit(1)
return fh
def run(fhandle):
"""
Detect gaps between residues in the PDB file.
Parameters
----------
fhandle : a line-by-line iterator of the original PDB file.
Returns
-------
None
Writes to the sys.stdout.
"""
fmt_GAPd = "{0[1]}:{0[3]}{0[2]} < {2:7.2f}A > {1[1]}:{1[3]}{1[2]}\n"
fmt_GAPs = "{0[1]}:{0[3]}{0[2]} < Seq. Gap > {1[1]}:{1[3]}{1[2]}\n"
centroid = ' CA ' # respect spacing. 'CA ' != ' CA '
distance_threshold = 4.0 * 4.0
def calculate_sq_atom_distance(i, j):
"""Squared euclidean distance between two 3d points"""
return (i[0] - j[0]) * (i[0] - j[0]) + \
(i[1] - j[1]) * (i[1] - j[1]) + \
(i[2] - j[2]) * (i[2] - j[2])
prev_at = (None, None, None, None, (None, None, None))
model = 0
n_gaps = 0
for line in fhandle:
if line.startswith('MODEL'):
model = int(line[10:14])
elif line.startswith('ATOM'):
atom_name = line[12:16]
if atom_name != centroid:
continue
resn = line[17:20]
resi = int(line[22:26])
chain = line[21]
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
at_uid = (model, chain, resi, resn, atom_name, (x, y, z))
if prev_at[0] == at_uid[0] and prev_at[1] == at_uid[1]:
d = calculate_sq_atom_distance(at_uid[5], prev_at[5])
if d > distance_threshold:
sys.stdout.write(fmt_GAPd.format(prev_at, at_uid, d**0.5))
n_gaps += 1
elif prev_at[2] + 1 != at_uid[2]:
sys.stdout.write(fmt_GAPs.format(prev_at, at_uid))
n_gaps += 1
prev_at = at_uid
sys.stdout.write('Found {} gap(s) in the structure\n'.format(n_gaps))
detect_gaps = run
def main():
# Check Input
pdbfh = check_input(sys.argv[1:])
# Do the job
run(pdbfh)
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
| {
"content_hash": "a4a983a06407f9970ae76d0df41d66cb",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 27.154411764705884,
"alnum_prop": 0.5410235580828595,
"repo_name": "haddocking/pdb-tools",
"id": "55db6332fa7373ede9ef86f6f08ab0466f415cbf",
"size": "4326",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdbtools/pdb_gap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "578989"
},
{
"name": "TeX",
"bytes": "980"
}
],
"symlink_target": ""
} |
import requests
import base64
def strip_username(repo):
return repo['full_name'].split('/')[1]
def repo_pretty_info(repo):
return strip_username(repo).ljust(32) + \
"[last commit on {:s}]".format(repo['pushed_at'])
def list_repos(user, verbose=True):
"""This function will return a list of all your repos. If
verbose==True, it will print your repo list, separated into
forked and not-forked repos."""
url = "https://api.github.com/users/{user:s}/repos".format(user=user)
response = requests.get(url, data={"user": user, "type": "sources"})
assert response.status_code == requests.codes.ok
json_response = response.json()
forked = []
notforked = []
for repo in json_response:
if repo['fork']:
forked.append(repo)
continue
notforked.append(repo)
if verbose:
print ("="*40 + "\nFORKED:")
for e in forked:
print(repo_pretty_info(e))
print("="*40 + "\nNOT-FORKED:")
for r in notforked:
print(repo_pretty_info(r))
return forked + notforked
def show_readme(user, repo):
url = "https://api.github.com/repos/{user:s}/{repo:s}/readme".\
format(user=user, repo=repo)
response = requests.get(url, data={"user": user})
assert response.status_code == requests.codes.ok
json_response = response.json()
readme_content = base64.b64decode(json_response['content']).decode('utf-8')
return readme_content
if __name__ == "__main__":
repos = list_repos("pedromduarte")
print("repos included = ", len(repos))
print(show_readme("pedromduarte", "thesis-hubbard"))
| {
"content_hash": "8e0316631fa029cba0a84928a5d4669e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 28.70689655172414,
"alnum_prop": 0.6156156156156156,
"repo_name": "PedroMDuarte/pmd-github-profile",
"id": "3c097618e5369bd017f685461e6de32b81256131",
"size": "1665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ghprofile/ghprofile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20297"
},
{
"name": "HTML",
"bytes": "4857"
},
{
"name": "Python",
"bytes": "6911"
}
],
"symlink_target": ""
} |
from zerver.lib.actions import do_create_user, do_mark_hotspot_as_read
from zerver.lib.hotspots import ALL_HOTSPOTS, INTRO_HOTSPOTS, get_next_hotspots
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import UserHotspot, UserProfile, get_realm
# Splitting this out, since I imagine this will eventually have most of the
# complicated hotspots logic.
class TestGetNextHotspots(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user = do_create_user(
"[email protected]", "password", get_realm("zulip"), "user", acting_user=None
)
def test_first_hotspot(self) -> None:
hotspots = get_next_hotspots(self.user)
self.assert_length(hotspots, 1)
self.assertEqual(hotspots[0]["name"], "intro_reply")
def test_some_done_some_not(self) -> None:
do_mark_hotspot_as_read(self.user, "intro_reply")
do_mark_hotspot_as_read(self.user, "intro_compose")
hotspots = get_next_hotspots(self.user)
self.assert_length(hotspots, 1)
self.assertEqual(hotspots[0]["name"], "intro_streams")
def test_all_intro_hotspots_done(self) -> None:
with self.settings(TUTORIAL_ENABLED=True):
self.assertNotEqual(self.user.tutorial_status, UserProfile.TUTORIAL_FINISHED)
for hotspot in INTRO_HOTSPOTS:
do_mark_hotspot_as_read(self.user, hotspot)
self.assertEqual(self.user.tutorial_status, UserProfile.TUTORIAL_FINISHED)
self.assertEqual(get_next_hotspots(self.user), [])
def test_send_all(self) -> None:
with self.settings(DEVELOPMENT=True, ALWAYS_SEND_ALL_HOTSPOTS=True):
self.assert_length(ALL_HOTSPOTS, len(get_next_hotspots(self.user)))
def test_tutorial_disabled(self) -> None:
with self.settings(TUTORIAL_ENABLED=False):
self.assertEqual(get_next_hotspots(self.user), [])
class TestHotspots(ZulipTestCase):
def test_do_mark_hotspot_as_read(self) -> None:
user = self.example_user("hamlet")
do_mark_hotspot_as_read(user, "intro_compose")
self.assertEqual(
list(UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)),
["intro_compose"],
)
def test_hotspots_url_endpoint(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_post("/json/users/me/hotspots", {"hotspot": "intro_reply"})
self.assert_json_success(result)
self.assertEqual(
list(UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)),
["intro_reply"],
)
result = self.client_post("/json/users/me/hotspots", {"hotspot": "invalid"})
self.assert_json_error(result, "Unknown hotspot: invalid")
self.assertEqual(
list(UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)),
["intro_reply"],
)
| {
"content_hash": "0ea05384f113d193cde1eeb25df6c9a9",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 90,
"avg_line_length": 43.130434782608695,
"alnum_prop": 0.6502016129032258,
"repo_name": "punchagan/zulip",
"id": "0927a54da7b697c75dd598ceca0ed6d3beaabdf0",
"size": "2976",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/tests/test_hotspots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "453615"
},
{
"name": "Dockerfile",
"bytes": "4898"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "607321"
},
{
"name": "Handlebars",
"bytes": "315160"
},
{
"name": "JavaScript",
"bytes": "3572990"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "94991"
},
{
"name": "Python",
"bytes": "8750579"
},
{
"name": "Ruby",
"bytes": "3875"
},
{
"name": "Shell",
"bytes": "134468"
},
{
"name": "TypeScript",
"bytes": "223296"
}
],
"symlink_target": ""
} |
import wx
import ImageTransferNotebook as imageNotebook
import SettingsFrame as prefs
import WifiDialog as wifi
import packages.rmnetwork as network
from packages.lang.Localizer import *
import sys, os, shutil
if platform.system() == "Linux":
from wx.lib.pubsub import setupkwargs
from wx.lib.pubsub import pub as Publisher
else:
from wx.lib.pubsub import pub as Publisher
from wx.lib.wordwrap import wordwrap
BASE_PATH = None
################################################################################
# MAIN FRAME OF APPLICATION ####################################################
################################################################################
class ImageTransferFrame(wx.Frame):
def __init__(self,parent,id,title,base_path):
wx.Frame.__init__(self,parent,id,title,size=(652,585),style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)
self.parent = parent
self.base_path = base_path
global BASE_PATH
BASE_PATH = base_path
self.Bind(wx.EVT_CLOSE, self.Close)
self.SetupMenuBar()
self.notebook = imageNotebook.ImageTransferNotebook(self,-1,None)
# Create an accelerator table for shortcuts
sc_wifi_id = wx.NewId()
sc_settings_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.ShowPlayerSettings, id=sc_settings_id)
self.Bind(wx.EVT_MENU, self.ShowWifiSettings, id=sc_wifi_id)
self.accel_tbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord(','), sc_settings_id),
(wx.ACCEL_SHIFT, ord('W'), sc_wifi_id)
])
self.SetAcceleratorTable(self.accel_tbl)
self.Center()
self.Show()
self.notebook.SearchHosts()
def Close(self, event=None):
# remove temp dir if present
from os.path import expanduser
home = expanduser("~")
appPath = home + '/.raspmedia/'
tmpPath = appPath + 'tmp/'
if os.path.isdir(tmpPath):
shutil.rmtree(tmpPath)
Publisher.unsubAll()
self.notebook.Close()
network.udpresponselistener.destroy()
self.Destroy()
sys.exit(0)
def SetupMenuBar(self):
# menus
fileMenu = wx.Menu()
helpMenu = wx.Menu()
# File Menu
menuSettings = fileMenu.Append(wx.ID_ANY, "&"+tr("player_settings") + "\tCTRL+,", tr("player_settings"))
path = resource_path("img/tools.png")
menuSettings.SetBitmap(wx.Bitmap(path))
menuExit = fileMenu.Append(wx.ID_EXIT, "&"+tr("exit"),tr("exit"))
self.Bind(wx.EVT_MENU, self.Close, menuExit)
self.Bind(wx.EVT_MENU, self.ShowPlayerSettings, menuSettings)
# Help Menu
about = helpMenu.Append(wx.ID_ANY, "&"+tr("about"))
self.Bind(wx.EVT_MENU, self.ShowAbout, about)
# Menubar
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, "&"+tr("file")) # Adding the "filemenu" to the MenuBar
menuBar.Append(helpMenu, "&"+tr("about"))
self.SetMenuBar(menuBar)
def ShowPlayerSettings(self, event):
actHost = self.notebook.CurrentlyActiveHost()
if not actHost == -1:
settings = prefs.SettingsFrame(self,-1,tr("player_settings"),actHost, self.notebook.CurrentConfig())
settings.Center()
settings.SetBackgroundColour('WHITE')
settings.Refresh()
settings.Show()
def ShowWifiSettings(self, event):
actHost = self.notebook.CurrentlyActiveHost()
if not actHost == -1:
wifiDlg = wifi.WifiDialog(self, -1, tr("wifi_settings"), actHost["addr"])
wifiDlg.ShowModal()
def SettingsClosedWithConfig(self, config):
self.notebook.UpdateCurrentPlayerConfig(config)
def ShowAbout(self, event):
# message read from defined version info file in the future
msg = "RaspMedia Image Transfer v1.0\n(c) 2014 by www.multimedia-installationen.at\nContact: [email protected]\nAll rights reserved."
dlg = wx.MessageDialog(self, msg, "About", style=wx.OK)
dlg.ShowModal()
# HELPER METHOD to get correct resource path for image file
def resource_path(relative_path):
global BASE_PATH
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
#print "BASE PATH FOUND: "+ base_path
except Exception:
#print "BASE PATH NOT FOUND!"
base_path = BASE_PATH
#print "JOINING " + base_path + " WITH " + relative_path
resPath = os.path.normcase(os.path.join(base_path, relative_path))
#resPath = base_path + relative_path
#print resPath
return resPath
| {
"content_hash": "e2f8db16c681e9de627da26f579c6d0b",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 161,
"avg_line_length": 39.11290322580645,
"alnum_prop": 0.6028865979381444,
"repo_name": "peter9teufel/raspmedia",
"id": "fac0da37664101ea742b488f6a83a69cc4f5d318",
"size": "4850",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Desktop/packages/rmgui/ImageTransferFrame.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "740"
},
{
"name": "CSS",
"bytes": "3503"
},
{
"name": "HTML",
"bytes": "2444"
},
{
"name": "PHP",
"bytes": "282"
},
{
"name": "Python",
"bytes": "291861"
},
{
"name": "Shell",
"bytes": "17406"
}
],
"symlink_target": ""
} |
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.arrays.masked_shared import (
ComparisonOps,
NumericOps,
)
class TestComparisonOps(NumericOps, ComparisonOps):
@pytest.mark.parametrize("other", [True, False, pd.NA, -1.0, 0.0, 1])
def test_scalar(self, other, comparison_op, dtype):
ComparisonOps.test_scalar(self, other, comparison_op, dtype)
def test_compare_with_integerarray(self, comparison_op):
op = comparison_op
a = pd.array([0, 1, None] * 3, dtype="Int64")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64")
other = b.astype("Int64")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
expected = op(other, a)
result = op(b, a)
tm.assert_extension_array_equal(result, expected)
def test_equals():
# GH-30652
# equals is generally tested in /tests/extension/base/methods, but this
# specifically tests that two arrays of the same class but different dtype
# do not evaluate equal
a1 = pd.array([1, 2, None], dtype="Float64")
a2 = pd.array([1, 2, None], dtype="Float32")
assert a1.equals(a2) is False
| {
"content_hash": "bd4e67222d3e9a6eacca2a403f185aee",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 34.22222222222222,
"alnum_prop": 0.6396103896103896,
"repo_name": "jorisvandenbossche/pandas",
"id": "c4163c25ae74d36a11be97cc59a159c8498b1a15",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/arrays/floating/test_comparison.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import unittest
import json
import requests
from mldb_py_runner.mldb_py_runner import MldbRunner
class MldbMergedDatasetTest(unittest.TestCase):
def setUp(self):
self.mldb = MldbRunner()
self.port = self.mldb.port
self.base_url = 'http://localhost:' + str(self.port) + '/v1'
def tearDown(self):
del self.mldb
self.mldb = None
def test_merge_non_existing_dataset(self):
"""
Create a merged dataset out of non existing datasets should not
work. Connector sends/receives everything as text.
"""
config = {
'type' : "merged",
'id' : "merged_dataset",
'params' : {
"datasets": [
{"id": "whatever_1"},
{"id": "whatever_2"}
]
}
}
r = requests.post(url=self.base_url + "/datasets",
data=config)
self.assertEqual(r.status_code, 400)
def test_merge_nothing(self):
"""
Create a merge dataset out of nothing should not work.
Connector sends/receives everything as text.
"""
config = {
'type' : "merged",
'id' : "merged_dataset",
'params' : {
"datasets": [
]
}
}
r = requests.post(url=self.base_url + "/datasets",
data=config)
self.assertEqual(r.status_code, 400)
def test_sync(self):
"""
Sync should work even when connector sends as json.
"""
config = {
'type' : "merged",
'id' : "merged_dataset",
'params' : {
"datasets": [
]
}
}
r = requests.post(url=self.base_url + '/datasets',
data=json.dumps(config),
headers={'content-type' : 'application/json'})
self.assertEqual(r.status_code, 400)
if __name__ == '__main__':
unittest.main(verbosity=2, buffer=True)
| {
"content_hash": "0c01b5e763b1402afff4270a595bc2b0",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 72,
"avg_line_length": 28.602739726027398,
"alnum_prop": 0.4803639846743295,
"repo_name": "mldbai/mldb",
"id": "dc51e4b27d4d6f6cf85696dbbb45d0a74ccea8f0",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/mldb_merged_dataset_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "643"
},
{
"name": "C",
"bytes": "11754639"
},
{
"name": "C++",
"bytes": "14072572"
},
{
"name": "CMake",
"bytes": "2737"
},
{
"name": "CSS",
"bytes": "17037"
},
{
"name": "Dockerfile",
"bytes": "1591"
},
{
"name": "Fortran",
"bytes": "16349"
},
{
"name": "HTML",
"bytes": "311171"
},
{
"name": "JavaScript",
"bytes": "2209253"
},
{
"name": "Jupyter Notebook",
"bytes": "7661154"
},
{
"name": "Makefile",
"bytes": "290745"
},
{
"name": "Perl",
"bytes": "3890"
},
{
"name": "Python",
"bytes": "1422764"
},
{
"name": "Shell",
"bytes": "32489"
},
{
"name": "Smarty",
"bytes": "2938"
},
{
"name": "SourcePawn",
"bytes": "52752"
}
],
"symlink_target": ""
} |
from http import client as http_client
from io import BytesIO
import json
import mimetypes
import os
import webob
import zipfile
from zipfile import ZipFile
from glance_store import exceptions as store_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import uuidutils
from tacker._i18n import _
from tacker.api.schemas import vnf_packages
from tacker.api import validation
from tacker.api.views import vnf_packages as vnf_packages_view
from tacker.common import csar_utils
from tacker.common import exceptions
from tacker.common import utils
from tacker.conductor.conductorrpc import vnf_pkgm_rpc
from tacker.glance_store import store as glance_store
from tacker.objects import fields
from tacker.objects import vnf_package as vnf_package_obj
from tacker.policies import vnf_package as vnf_package_policies
from tacker import wsgi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VnfPkgmController(wsgi.Controller):
_view_builder_class = vnf_packages_view.ViewBuilder
def __init__(self):
super(VnfPkgmController, self).__init__()
self.rpc_api = vnf_pkgm_rpc.VNFPackageRPCAPI()
glance_store.initialize_glance_store()
def _get_vnf_package(self, id, request):
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id)
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return vnf_package
@wsgi.response(http_client.CREATED)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN))
@validation.schema(vnf_packages.create)
def create(self, request, body):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'create')
vnf_package = vnf_package_obj.VnfPackage(context=request.context)
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.CREATED)
vnf_package.operational_state = (
fields.PackageOperationalStateType.DISABLED)
vnf_package.usage_state = fields.PackageUsageStateType.NOT_IN_USE
vnf_package.user_data = body.get('userDefinedData', dict())
vnf_package.tenant_id = request.context.project_id
vnf_package.create()
headers = {"location": '/vnfpkgm/v1/vnf_packages/%s' % vnf_package.id}
result = self._view_builder.create(vnf_package)
return wsgi.ResponseObject(result, headers=headers)
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND))
def show(self, request, id):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'show')
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id, expected_attrs=[
"vnf_deployment_flavours", "vnfd", "vnf_artifacts"])
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(vnf_package)
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN))
@validation.query_schema(vnf_packages.query_params_v1)
def index(self, request):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'index')
search_opts = {}
search_opts.update(request.GET)
def _key_exists(key, validate_value=True):
try:
request.GET[key]
except KeyError:
return False
return True
all_fields = _key_exists('all_fields')
exclude_default = _key_exists('exclude_default')
fields = request.GET.get('fields')
exclude_fields = request.GET.get('exclude_fields')
filters = request.GET.get('filter')
if not (all_fields or fields or exclude_fields):
exclude_default = True
self._view_builder.validate_attribute_fields(all_fields=all_fields,
fields=fields, exclude_fields=exclude_fields,
exclude_default=exclude_default)
filters = self._view_builder.validate_filter(filters)
vnf_packages = vnf_package_obj.VnfPackagesList.get_by_filters(
request.context, read_deleted='no', filters=filters)
return self._view_builder.index(vnf_packages,
all_fields=all_fields,
exclude_fields=exclude_fields,
fields=fields,
exclude_default=exclude_default)
@wsgi.response(http_client.NO_CONTENT)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND,
http_client.CONFLICT))
def delete(self, request, id):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'delete')
vnf_package = self._get_vnf_package(id, request)
if (vnf_package.operational_state ==
fields.PackageOperationalStateType.ENABLED or
vnf_package.usage_state ==
fields.PackageUsageStateType.IN_USE):
msg = _("VNF Package %(id)s cannot be deleted as it's "
"operational state is %(operational_state)s and usage "
"state is %(usage_state)s.")
raise webob.exc.HTTPConflict(
explanation=msg % {
"id": id,
"operational_state": vnf_package.operational_state,
"usage_state": vnf_package.usage_state})
# Delete vnf_package
self.rpc_api.delete_vnf_package(context, vnf_package)
@wsgi.response(http_client.ACCEPTED)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND,
http_client.CONFLICT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE))
def fetch_vnf_package_content(self, request, id):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'fetch_package_content')
vnf_package = self._get_vnf_package(id, request)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.ONBOARDED:
msg = _("VNF Package %(id)s onboarding state "
"is not %(onboarding)s")
raise webob.exc.HTTPConflict(explanation=msg % {"id": id,
"onboarding": fields.PackageOnboardingStateType.ONBOARDED})
if vnf_package.size == 0:
try:
zip_file_size = glance_store.get_csar_size(id,
vnf_package.location_glance_store)
vnf_package.size = zip_file_size
vnf_package.save()
except exceptions.VnfPackageLocationInvalid:
msg = _("Vnf package not present at location")
raise webob.exc.HTTPNotFound(explanation=msg)
else:
zip_file_size = vnf_package.size
range_val = self._get_range_from_request(request, zip_file_size)
return self._download(
request.response, range_val, id, vnf_package.location_glance_store,
zip_file_size)
def _download(self, response, range_val, uuid, location, zip_file_size):
offset, chunk_size = 0, None
if range_val:
if isinstance(range_val, webob.byterange.Range):
response_end = zip_file_size - 1
# NOTE(sameert): webob parsing is zero-indexed.
# i.e.,to download first 5 bytes of a 10 byte image,
# request should be "bytes=0-4" and the response would be
# "bytes 0-4/10".
# Range if validated, will never have 'start' object as None.
if range_val.start >= 0:
offset = range_val.start
else:
# NOTE(sameert): Negative start values needs to be
# processed to allow suffix-length for Range request
# like "bytes=-2" as per rfc7233.
if abs(range_val.start) < zip_file_size:
offset = zip_file_size + range_val.start
if range_val.end is not None and range_val.end < zip_file_size:
chunk_size = range_val.end - offset
response_end = range_val.end - 1
else:
chunk_size = zip_file_size - offset
response.status_int = 206
response.headers['Content-Type'] = 'application/zip'
response.app_iter = self._get_csar_zip_data(uuid,
location, offset, chunk_size)
# NOTE(sameert): In case of a full zip download, when
# chunk_size was none, reset it to zip.size to set the
# response header's Content-Length.
if chunk_size is not None:
response.headers['Content-Range'] = 'bytes %s-%s/%s'\
% (offset,
response_end,
zip_file_size)
else:
chunk_size = zip_file_size
response.headers['Content-Length'] = str(chunk_size)
return response
def _get_csar_zip_data(self, uuid, location, offset=0, chunk_size=None):
try:
resp, size = glance_store.load_csar_iter(
uuid, location, offset=offset, chunk_size=chunk_size)
except exceptions.VnfPackageLocationInvalid:
msg = _("Vnf package not present at location")
raise webob.exc.HTTPServerError(explanation=msg)
return resp
def _get_range_from_request(self, request, zip_file_size):
range_str = request._headers.environ.get('HTTP_RANGE')
if range_str is not None:
# NOTE(sameert): We do not support multi range requests.
if ',' in range_str:
msg = _("Requests with multiple ranges are not supported in "
"Tacker. You may make multiple single-range requests "
"instead.")
raise webob.exc.HTTPBadRequest(explanation=msg)
range_ = webob.byterange.Range.parse(range_str)
if range_ is None:
range_err_msg = _("The byte range passed in the 'Range' header"
" did not match any available byte range in the VNF package"
" file")
raise webob.exc.HTTPRequestRangeNotSatisfiable(
explanation=range_err_msg)
# NOTE(sameert): Ensure that a range like bytes=4- for an zip
# size of 3 is invalidated as per rfc7233.
if range_.start >= zip_file_size:
msg = _("Invalid start position in Range header. "
"Start position MUST be in the inclusive range"
"[0, %s].") % (zip_file_size - 1)
raise webob.exc.HTTPRequestRangeNotSatisfiable(
explanation=msg)
return range_
@wsgi.response(http_client.ACCEPTED)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND,
http_client.CONFLICT))
def upload_vnf_package_content(self, request, id, body):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'upload_package_content')
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
return self._make_problem_detail('Not Found', msg, 404)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id)
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
return self._make_problem_detail('Not Found', msg, 404)
except Exception as e:
return self._make_problem_detail(
'Internal Server Error', str(e), 500)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.CREATED:
msg = _("VNF Package %(id)s onboarding state "
"is not %(onboarding)s")
return self._make_problem_detail('Conflict', msg % {"id": id,
"onboarding": fields.PackageOnboardingStateType.CREATED},
409)
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.UPLOADING)
try:
vnf_package.save()
except Exception as e:
return self._make_problem_detail(
'Internal Server Error', str(e), 500)
try:
(location, size, checksum, multihash,
loc_meta) = glance_store.store_csar(context, id, body)
except exceptions.UploadFailedToGlanceStore:
with excutils.save_and_reraise_exception():
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.CREATED)
try:
vnf_package.save()
except Exception as e:
return self._make_problem_detail(
'Internal Server Error', str(e), 500)
vnf_package.algorithm = CONF.vnf_package.hashing_algorithm
vnf_package.hash = multihash
vnf_package.location_glance_store = location
vnf_package.size = size
try:
vnf_package.save()
except Exception as e:
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.CREATED)
try:
vnf_package.save()
except Exception as e:
return self._make_problem_detail(
'Internal Server Error', str(e), 500)
return self._make_problem_detail(
'Internal Server Error', str(e), 500)
# process vnf_package
self.rpc_api.upload_vnf_package_content(context, vnf_package)
@wsgi.response(http_client.ACCEPTED)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN,
http_client.NOT_FOUND, http_client.CONFLICT))
@validation.schema(vnf_packages.upload_from_uri)
def upload_vnf_package_from_uri(self, request, id, body):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'upload_from_uri')
url = body['addressInformation']
if not utils.is_valid_url(url):
msg = _("Vnf package url '%s' is invalid") % url
raise webob.exc.HTTPBadRequest(explanation=msg)
vnf_package = self._get_vnf_package(id, request)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.CREATED:
msg = _("VNF Package %(id)s onboarding state is not "
"%(onboarding)s")
raise webob.exc.HTTPConflict(explanation=msg % {"id": id,
"onboarding": fields.PackageOnboardingStateType.CREATED})
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.UPLOADING)
vnf_package.save()
# process vnf_package
self.rpc_api.upload_vnf_package_from_uri(context, vnf_package,
body['addressInformation'],
user_name=body.get('userName'),
password=body.get('password'))
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN,
http_client.NOT_FOUND, http_client.CONFLICT))
@validation.schema(vnf_packages.patch)
def patch(self, request, id, body):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'patch')
old_vnf_package = self._get_vnf_package(id, request)
vnf_package = old_vnf_package.obj_clone()
user_data = body.get('userDefinedData')
operational_state = body.get('operationalState')
if operational_state:
if vnf_package.onboarding_state == \
fields.PackageOnboardingStateType.ONBOARDED:
if vnf_package.operational_state == operational_state:
msg = _("VNF Package %(id)s is already in "
"%(operationState)s operational state") % {
"id": id,
"operationState": vnf_package.operational_state}
raise webob.exc.HTTPConflict(explanation=msg)
else:
# update vnf_package operational state,
# if vnf_package Onboarding State is ONBOARDED
vnf_package.operational_state = operational_state
else:
if not user_data:
msg = _("Updating operational state is not allowed for VNF"
" Package %(id)s when onboarding state is not "
"%(onboarded)s")
raise webob.exc.HTTPBadRequest(
explanation=msg % {"id": id, "onboarded": fields.
PackageOnboardingStateType.ONBOARDED})
# update user data
if user_data:
for key, value in list(user_data.items()):
if vnf_package.user_data.get(key) == value:
del user_data[key]
if not user_data:
msg = _("The userDefinedData provided in update request is as"
" the existing userDefinedData of vnf package %(id)s."
" Nothing to update.")
raise webob.exc.HTTPConflict(
explanation=msg % {"id": id})
vnf_package.user_data = user_data
vnf_package.save()
return self._view_builder.patch(old_vnf_package, vnf_package)
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN,
http_client.NOT_FOUND, http_client.NOT_ACCEPTABLE,
http_client.CONFLICT,
http_client.INTERNAL_SERVER_ERROR))
def get_vnf_package_vnfd(self, request, id):
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'get_vnf_package_vnfd')
valid_accept_headers = ['application/zip', 'text/plain']
accept_headers = request.headers['Accept'].split(',')
for header in accept_headers:
if header not in valid_accept_headers:
msg = _("Accept header %(accept)s is invalid, it should be one"
" of these values: %(valid_values)s")
raise webob.exc.HTTPNotAcceptable(
explanation=msg % {"accept": header,
"valid_values": ",".join(
valid_accept_headers)})
vnf_package = self._get_vnf_package(id, request)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.ONBOARDED:
msg = _("VNF Package %(id)s state is not "
"%(onboarded)s")
raise webob.exc.HTTPConflict(explanation=msg % {"id": id,
"onboarded": fields.PackageOnboardingStateType.ONBOARDED})
try:
vnfd_files_and_data = self.rpc_api.\
get_vnf_package_vnfd(context, vnf_package)
except exceptions.FailedToGetVnfdData as e:
LOG.error(e.msg)
raise webob.exc.HTTPInternalServerError(explanation=str(e.msg))
if 'text/plain' in accept_headers:
# Checking for yaml files only. This is required when there is
# TOSCA.meta file along with single yaml file.
# In such case we need to return single yaml file.
file_list = list(vnfd_files_and_data.keys())
yaml_files = [file for file in file_list if file.endswith(
('.yaml', '.yml'))]
if len(yaml_files) == 1:
request.response.headers['Content-Type'] = 'text/plain'
return vnfd_files_and_data[yaml_files[0]]
elif 'application/zip' in accept_headers:
request.response.headers['Content-Type'] = 'application/zip'
return self._create_vnfd_zip(vnfd_files_and_data)
else:
msg = _("VNFD is implemented as multiple yaml files,"
" Accept header should be 'application/zip'.")
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
request.response.headers['Content-Type'] = 'application/zip'
return self._create_vnfd_zip(vnfd_files_and_data)
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN,
http_client.NOT_FOUND, http_client.CONFLICT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE))
def fetch_vnf_package_artifacts(self, request, id, artifact_path):
context = request.environ['tacker.context']
# get policy
context.can(vnf_package_policies.VNFPKGM % 'fetch_artifact')
# get vnf_package
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id,
expected_attrs=["vnf_artifacts"])
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.ONBOARDED:
msg = _("VNF Package %(id)s state is not "
"%(onboarded)s")
raise webob.exc.HTTPConflict(explanation=msg % {"id": id,
"onboarded": fields.PackageOnboardingStateType.ONBOARDED})
offset, chunk_size = 0, None
# get all artifact's path
artifact_file_paths = []
for item in vnf_package.vnf_artifacts:
artifact_file_paths.append(item.artifact_path)
if artifact_path in artifact_file_paths:
# get file's size
csar_path = self._get_csar_path(vnf_package)
absolute_artifact_path = os.path.join(csar_path, artifact_path)
if not os.path.isfile(absolute_artifact_path):
msg = _(
"This type of path(url) '%s' is currently not supported") \
% artifact_path
raise webob.exc.HTTPBadRequest(explanation=msg)
artifact_size = os.path.getsize(absolute_artifact_path)
range_val = self._get_range_from_request(request, artifact_size)
# range_val exists
if range_val:
if isinstance(range_val, webob.byterange.Range):
# get the position of the last byte in the artifact file
response_end = artifact_size - 1
if range_val.start >= 0:
offset = range_val.start
else:
if abs(range_val.start) < artifact_size:
offset = artifact_size + range_val.start
if range_val.end is not None and \
range_val.end < artifact_size:
chunk_size = range_val.end - offset
response_end = range_val.end - 1
else:
chunk_size = artifact_size - offset
request.response.status_int = 206
# range_val does not exist, download the whole content of file
else:
offset = 0
chunk_size = artifact_size
# get file's mineType;
mime_type = mimetypes.guess_type(artifact_path.split('/')[-1])[0]
if mime_type:
request.response.headers['Content-Type'] = mime_type
else:
request.response.headers['Content-Type'] = \
'application/octet-stream'
try:
artifact_data = self._download_vnf_artifact(
absolute_artifact_path, offset, chunk_size)
except exceptions.FailedToGetVnfArtifact as e:
LOG.error(e.msg)
raise webob.exc.HTTPInternalServerError(
explanation=e.msg)
request.response.text = artifact_data.decode('utf-8')
if request.response.status_int == 206:
request.response.headers['Content-Range'] = 'bytes %s-%s/%s' \
% (offset,
response_end,
artifact_size)
else:
chunk_size = artifact_size
request.response.headers['Content-Length'] = chunk_size
return request.response
else:
msg = _("Not Found Artifact File.")
raise webob.exc.HTTPNotFound(explanation=msg)
def _get_csar_path(self, vnf_package):
csar_path = os.path.join(CONF.vnf_package.vnf_package_csar_path,
vnf_package.id)
if not os.path.isdir(csar_path):
location = vnf_package.location_glance_store
try:
zip_path = glance_store.load_csar(vnf_package.id, location)
csar_utils.extract_csar_zip_file(zip_path, csar_path)
except (store_exceptions.GlanceStoreException) as e:
exc_msg = encodeutils.exception_to_unicode(e)
msg = (_("Exception raised from glance store can be "
"unrecoverable if it is not related to connection"
" error. Error: %s.") % exc_msg)
raise exceptions.FailedToGetVnfArtifact(error=msg)
return csar_path
def _download_vnf_artifact(self, artifact_file_path, offset=0,
chunk_size=None):
try:
with open(artifact_file_path, 'rb') as f:
f.seek(offset, 1)
vnf_artifact_data = f.read(chunk_size)
return vnf_artifact_data
except Exception as e:
exc_msg = encodeutils.exception_to_unicode(e)
msg = (_("Exception raised while reading artifact file"
" Error: %s.") % exc_msg)
raise exceptions.FailedToGetVnfArtifact(error=msg)
def _create_vnfd_zip(self, vnfd_files_and_data):
buff = BytesIO()
with ZipFile(buff, 'w', zipfile.ZIP_DEFLATED) as zip_archive:
for file_path, file_data in vnfd_files_and_data.items():
zip_archive.writestr(file_path, file_data)
return buff.getvalue()
def _make_problem_detail(self, title, detail, status):
res = webob.Response(content_type='application/problem+json')
problemDetails = {}
problemDetails['title'] = title
problemDetails['detail'] = detail
problemDetails['status'] = status
res.text = json.dumps(problemDetails)
res.status_int = status
return res
def create_resource():
body_deserializers = {
'application/zip': wsgi.ZipDeserializer()
}
deserializer = wsgi.RequestDeserializer(
body_deserializers=body_deserializers)
return wsgi.Resource(VnfPkgmController(), deserializer=deserializer)
| {
"content_hash": "603e21479fab7f3f472e8bb145b4ec99",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 79,
"avg_line_length": 43.62917933130699,
"alnum_prop": 0.5717570015326738,
"repo_name": "stackforge/tacker",
"id": "eaf95e90bcc04a52bf7ff9d093b0c51dab0ad0a4",
"size": "29336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/api/vnfpkgm/v1/controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1143026"
},
{
"name": "Shell",
"bytes": "26584"
}
],
"symlink_target": ""
} |
import webbrowser
import os
import base64
import launcher_log
import pygtk
pygtk.require('2.0')
import gtk
gtk.gdk.threads_init()
try:
import pynotify
pynotify.init('OSS-FTP Notify')
except:
launcher_log.warn("import pynotify fail, please install python-notify if possiable.")
pynotify = None
import module_init
class Gtk_tray():
notify_list = []
def __init__(self):
logo_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'web_ui', 'favicon.ico')
self.trayicon = gtk.StatusIcon()
self.trayicon.set_from_file(logo_filename)
self.trayicon.connect('popup-menu', lambda i, b, t: self.make_menu().popup(None, None, gtk.status_icon_position_menu, b, t, self.trayicon))
self.trayicon.connect('activate', self.show_control_web)
self.trayicon.set_tooltip('OSS-FTP')
self.trayicon.set_visible(True)
def make_menu(self):
menu = gtk.Menu()
itemlist = [(u'Config', self.on_show),
('Restart OSS-FTP', self.on_restart_ossftp),
(u'Quit', self.on_quit)]
for text, callback in itemlist:
item = gtk.MenuItem(text)
item.connect('activate', callback)
item.show()
menu.append(item)
menu.show()
return menu
def on_show(self, widget=None, data=None):
self.show_control_web()
def notify_general(self, msg="msg", title="Title", buttons={}, timeout=3600):
if not pynotify:
return False
n = pynotify.Notification('Test', msg)
for k in buttons:
data = buttons[k]["data"]
label = buttons[k]["label"]
callback = buttons[k]["callback"]
n.add_action(data, label, callback)
n.set_timeout(timeout)
n.show()
self.notify_list.append(n)
return True
def show_control_web(self, widget=None, data=None):
webbrowser.open_new("http://127.0.0.1:8192/")
def on_restart_ossftp(self, widget=None, data=None):
module_init.stop_all()
module_init.start_all_auto()
def on_quit(self, widget, data=None):
gtk.main_quit()
def serve_forever(self):
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
sys_tray = Gtk_tray()
def main():
sys_tray.serve_forever()
if __name__ == '__main__':
main()
| {
"content_hash": "784233a10005380dd97c2ee8b33eede1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 147,
"avg_line_length": 27.0561797752809,
"alnum_prop": 0.592607973421927,
"repo_name": "johnkeepmoving/oss-ftp",
"id": "ecbd0a29b6b912257742f473c45b858b0b55f5ab",
"size": "2503",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "launcher/gtk_tray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "463131"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "2005"
},
{
"name": "CSS",
"bytes": "84389"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "140331"
},
{
"name": "JavaScript",
"bytes": "5048"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "24514656"
},
{
"name": "R",
"bytes": "2528"
},
{
"name": "Shell",
"bytes": "7175"
},
{
"name": "Tcl",
"bytes": "2150885"
},
{
"name": "Visual Basic",
"bytes": "529"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import unittest
from biggraphite import metric as bg_metric
from biggraphite import settings as bg_settings
from biggraphite.cli import command_list
from tests import test_utils as bg_test_utils
class TestCommandList(bg_test_utils.TestCaseWithFakeAccessor):
def test_run(self):
self.accessor.drop_all_metrics()
cmd = command_list.CommandList()
parser = argparse.ArgumentParser()
bg_settings.add_argparse_arguments(parser)
cmd.add_arguments(parser)
name = "foo.bar"
metadata = bg_metric.MetricMetadata(
retention=bg_metric.Retention.from_string("1440*60s")
)
self.accessor.create_metric(bg_test_utils.make_metric(name, metadata))
opts = parser.parse_args(["foo.*"])
cmd.run(self.accessor, opts)
metrics = command_list.list_metrics(self.accessor, opts.glob, opts.graphite)
self.assertEqual(name, list(metrics)[0].name)
opts = parser.parse_args(["--graphite", "foo.{bar}"])
metrics = command_list.list_metrics(self.accessor, opts.glob, opts.graphite)
self.assertEqual(name, list(metrics)[0].name)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "ecc96ca3f51b8682aeb7194a5215a1b5",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 31.275,
"alnum_prop": 0.6738609112709832,
"repo_name": "iksaif/biggraphite",
"id": "f5c49ce693d9f57fc039b7de5479b623fcd23c56",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cli/test_command_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4291"
},
{
"name": "Java",
"bytes": "51088"
},
{
"name": "Python",
"bytes": "655048"
},
{
"name": "Shell",
"bytes": "8862"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def StoragePlacementSpec(vim, *args, **kwargs):
'''StoragePlacementSpec encapsulates all of the inputs passed to the
RecommendDatastores method.NOTE: This data object type and all of its methods
are experimental and subject to change in future releases.'''
obj = vim.client.factory.create('ns0:StoragePlacementSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'podSelectionSpec', 'type' ]
optional = [ 'cloneName', 'cloneSpec', 'configSpec', 'folder', 'host', 'priority',
'relocateSpec', 'resourcePool', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "8e6c50782fe5c6bab0f6b2fef1788f84",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 124,
"avg_line_length": 36.97142857142857,
"alnum_prop": 0.625193199381762,
"repo_name": "xuru/pyvisdk",
"id": "4bfa028530abc8ea8f957f7c5b30e9f56ba0fdb5",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/storage_placement_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
class RegionStub:
def end(self):
return 'dummy region end'
def empty(self):
return False
class EmptyRegionStub:
def __init__(self):
self.dummy_region_end = 'dummy region end'
def end(self):
return self.dummy_region_end
def empty(self):
return True
| {
"content_hash": "00c6326c34982cad63ba7d920d67b3e2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 18.41176470588235,
"alnum_prop": 0.597444089456869,
"repo_name": "ldgit/hours-calculator",
"id": "ce7fa2c7491ad20795b2fe650fe37e00a4e12128",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/region_stub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17501"
}
],
"symlink_target": ""
} |
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from typing import (
Any,
Callable,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
IO,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Union,
TypeVar,
cast,
overload,
TYPE_CHECKING,
)
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
Serializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
if TYPE_CHECKING:
import socket
import io
from pyspark._typing import NonUDFType
from pyspark._typing import S, NumberOrArray
from pyspark.context import SparkContext
from pyspark.sql.pandas._typing import (
PandasScalarUDFType,
PandasGroupedMapUDFType,
PandasGroupedAggUDFType,
PandasWindowAggUDFType,
PandasScalarIterUDFType,
PandasMapIterUDFType,
PandasCogroupedMapUDFType,
ArrowMapIterUDFType,
PandasGroupedMapUDFWithStateType,
)
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import AtomicType, StructType
from pyspark.sql._typing import AtomicValue, RowLike, SQLBatchedUDFType
from py4j.java_gateway import JavaObject
from py4j.java_collections import JavaArray
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
U = TypeVar("U")
K = TypeVar("K", bound=Hashable)
V = TypeVar("V")
V1 = TypeVar("V1")
V2 = TypeVar("V2")
V3 = TypeVar("V3")
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF: "NonUDFType" = 0
SQL_BATCHED_UDF: "SQLBatchedUDFType" = 100
SQL_SCALAR_PANDAS_UDF: "PandasScalarUDFType" = 200
SQL_GROUPED_MAP_PANDAS_UDF: "PandasGroupedMapUDFType" = 201
SQL_GROUPED_AGG_PANDAS_UDF: "PandasGroupedAggUDFType" = 202
SQL_WINDOW_AGG_PANDAS_UDF: "PandasWindowAggUDFType" = 203
SQL_SCALAR_PANDAS_ITER_UDF: "PandasScalarIterUDFType" = 204
SQL_MAP_PANDAS_ITER_UDF: "PandasMapIterUDFType" = 205
SQL_COGROUPED_MAP_PANDAS_UDF: "PandasCogroupedMapUDFType" = 206
SQL_MAP_ARROW_ITER_UDF: "ArrowMapIterUDFType" = 207
SQL_GROUPED_MAP_PANDAS_UDF_WITH_STATE: "PandasGroupedMapUDFWithStateType" = 208
def portable_hash(x: Hashable) -> int:
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
confidence: float
low: float
high: float
def __new__(cls, mean: float, confidence: float, low: float, high: float) -> "BoundedFloat":
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info: "JavaArray") -> "io.BufferedRWPair":
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
sockfile: "io.BufferedRWPair"
sock: "socket.socket"
port: int = sock_info[0]
auth_secret: str = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : class:`Serializer`
The PySpark serializer to use
Returns
-------
result of meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info: "JavaArray", _serializer: Serializer):
port: int
auth_secret: str
jsocket_auth_server: "JavaObject"
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter: Iterator[Any] = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self) -> Iterator[Any]:
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self) -> None:
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions: int, partitionFunc: Callable[[Any], int]):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k: Any) -> int:
return self.partitionFunc(k) % self.numPartitions
class RDD(Generic[T_co]):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(
self,
jrdd: "JavaObject",
ctx: "SparkContext",
jrdd_deserializer: Serializer = AutoBatchedSerializer(CPickleSerializer()),
):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner: Optional[Partitioner] = None
def _pickled(self: "RDD[T]") -> "RDD[T]":
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self) -> int:
"""
A unique ID for this RDD (within its SparkContext).
.. versionadded:: 0.7.0
Returns
-------
int
The unique ID for this :class:`RDD`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.id() # doctest: +SKIP
3
"""
return self._id
def __repr__(self) -> str:
return self._jrdd.toString()
def __getnewargs__(self) -> NoReturn:
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self) -> "SparkContext":
"""
The :class:`SparkContext` that this RDD was created on.
.. versionadded:: 0.7.0
Returns
-------
:class:`SparkContext`
The :class:`SparkContext` that this RDD was created on
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.context
<SparkContext ...>
>>> rdd.context is sc
True
"""
return self.ctx
def cache(self: "RDD[T]") -> "RDD[T]":
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
.. versionadded:: 0.7.0
Returns
-------
:class:`RDD`
The same :class:`RDD` with storage level set to `MEMORY_ONLY`
See Also
--------
:meth:`RDD.persist`
:meth:`RDD.unpersist`
:meth:`RDD.getStorageLevel`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd2 = rdd.cache()
>>> rdd2 is rdd
True
>>> str(rdd.getStorageLevel())
'Memory Serialized 1x Replicated'
>>> _ = rdd.unpersist()
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self: "RDD[T]", storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY) -> "RDD[T]":
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
.. versionadded:: 0.9.1
Parameters
----------
storageLevel : :class:`StorageLevel`, default `MEMORY_ONLY`
the target storage level
Returns
-------
:class:`RDD`
The same :class:`RDD` with storage level set to `storageLevel`.
See Also
--------
:meth:`RDD.cache`
:meth:`RDD.unpersist`
:meth:`RDD.getStorageLevel`
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
>>> str(rdd.getStorageLevel())
'Memory Serialized 1x Replicated'
>>> _ = rdd.unpersist()
>>> rdd.is_cached
False
>>> from pyspark import StorageLevel
>>> rdd2 = sc.range(5)
>>> _ = rdd2.persist(StorageLevel.MEMORY_AND_DISK)
>>> rdd2.is_cached
True
>>> str(rdd2.getStorageLevel())
'Disk Memory Serialized 1x Replicated'
Can not override existing storage level
>>> _ = rdd2.persist(StorageLevel.MEMORY_ONLY_2)
Traceback (most recent call last):
...
py4j.protocol.Py4JJavaError: ...
Assign another storage level after `unpersist`
>>> _ = rdd2.unpersist()
>>> rdd2.is_cached
False
>>> _ = rdd2.persist(StorageLevel.MEMORY_ONLY_2)
>>> str(rdd2.getStorageLevel())
'Memory Serialized 2x Replicated'
>>> rdd2.is_cached
True
>>> _ = rdd2.unpersist()
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self: "RDD[T]", blocking: bool = False) -> "RDD[T]":
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 0.9.1
Parameters
----------
blocking : bool, optional, default False
whether to block until all blocks are deleted
.. versionadded:: 3.0.0
Returns
-------
:class:`RDD`
The same :class:`RDD`
See Also
--------
:meth:`RDD.cache`
:meth:`RDD.persist`
:meth:`RDD.getStorageLevel`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.is_cached
False
>>> _ = rdd.unpersist()
>>> rdd.is_cached
False
>>> _ = rdd.cache()
>>> rdd.is_cached
True
>>> _ = rdd.unpersist()
>>> rdd.is_cached
False
>>> _ = rdd.unpersist()
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self) -> None:
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
.. versionadded:: 0.7.0
See Also
--------
:meth:`RDD.isCheckpointed`
:meth:`RDD.getCheckpointFile`
:meth:`RDD.localCheckpoint`
:meth:`SparkContext.setCheckpointDir`
:meth:`SparkContext.getCheckpointDir`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.is_checkpointed
False
>>> rdd.getCheckpointFile() == None
True
>>> rdd.checkpoint()
>>> rdd.is_checkpointed
True
>>> rdd.getCheckpointFile() == None
True
>>> rdd.count()
5
>>> rdd.is_checkpointed
True
>>> rdd.getCheckpointFile() == None
False
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self) -> bool:
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
.. versionadded:: 0.7.0
Returns
-------
bool
whether this :class:`RDD` is checkpointed and materialized, either reliably or locally
See Also
--------
:meth:`RDD.checkpoint`
:meth:`RDD.getCheckpointFile`
:meth:`SparkContext.setCheckpointDir`
:meth:`SparkContext.getCheckpointDir`
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self) -> None:
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
.. versionadded:: 2.2.0
See Also
--------
:meth:`RDD.checkpoint`
:meth:`RDD.isLocallyCheckpointed`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.isLocallyCheckpointed()
False
>>> rdd.localCheckpoint()
>>> rdd.isLocallyCheckpointed()
True
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self) -> bool:
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
.. versionadded:: 2.2.0
Returns
-------
bool
whether this :class:`RDD` is marked for local checkpointing
See Also
--------
:meth:`RDD.localCheckpoint`
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self) -> Optional[str]:
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
.. versionadded:: 0.7.0
Returns
-------
str
the name of the file to which this :class:`RDD` was checkpointed
See Also
--------
:meth:`RDD.checkpoint`
:meth:`SparkContext.setCheckpointDir`
:meth:`SparkContext.getCheckpointDir`
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
return checkpointFile.get() if checkpointFile.isDefined() else None
def cleanShuffleDependencies(self, blocking: bool = False) -> None:
"""
Removes an RDD's shuffles and it's non-persisted ancestors.
When running without a shuffle service, cleaning up shuffle files enables downscaling.
If you use the RDD after this call, you should checkpoint and materialize it first.
.. versionadded:: 3.3.0
Parameters
----------
blocking : bool, optional, default False
whether to block on shuffle cleanup tasks
Notes
-----
This API is a developer API.
"""
self._jrdd.rdd().cleanShuffleDependencies(blocking)
def map(self: "RDD[T]", f: Callable[[T], U], preservesPartitioning: bool = False) -> "RDD[U]":
"""
Return a new RDD by applying a function to each element of this RDD.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to run on each element of the RDD
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to all elements
See Also
--------
:meth:`RDD.flatMap`
:meth:`RDD.mapPartitions`
:meth:`RDD.mapPartitionsWithIndex`
:meth:`RDD.mapPartitionsWithSplit`
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(
self: "RDD[T]", f: Callable[[T], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to trun a T into a sequence of U
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to all elements
See Also
--------
:meth:`RDD.map`
:meth:`RDD.mapPartitions`
:meth:`RDD.mapPartitionsWithIndex`
:meth:`RDD.mapPartitionsWithSplit`
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(
self: "RDD[T]", f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to run on each partition of the RDD
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to each partition
See Also
--------
:meth:`RDD.map`
:meth:`RDD.flatMap`
:meth:`RDD.mapPartitionsWithIndex`
:meth:`RDD.mapPartitionsWithSplit`
:meth:`RDDBarrier.mapPartitions`
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to run on each partition of the RDD
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to each partition
See Also
--------
:meth:`RDD.map`
:meth:`RDD.flatMap`
:meth:`RDD.mapPartitions`
:meth:`RDD.mapPartitionsWithSplit`
:meth:`RDDBarrier.mapPartitionsWithIndex`
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. versionadded:: 0.7.0
.. deprecated:: 0.9.0
use meth:`RDD.mapPartitionsWithIndex` instead.
Parameters
----------
f : function
a function to run on each partition of the RDD
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to each partition
See Also
--------
:meth:`RDD.map`
:meth:`RDD.flatMap`
:meth:`RDD.mapPartitions`
:meth:`RDD.mapPartitionsWithIndex`
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self) -> int:
"""
Returns the number of partitions in RDD
.. versionadded:: 1.1.0
Returns
-------
int
number of partitions
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self: "RDD[T]", f: Callable[[T], bool]) -> "RDD[T]":
"""
Return a new RDD containing only the elements that satisfy a predicate.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to run on each element of the RDD
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to each element
See Also
--------
:meth:`RDD.map`
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator: Iterable[T]) -> Iterable[T]:
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return a new RDD containing the distinct elements in this RDD.
.. versionadded:: 0.7.0
Parameters
----------
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a new :class:`RDD` containing the distinct elements
See Also
--------
:meth:`RDD.countApproxDistinct`
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(
self: "RDD[T]", withReplacement: bool, fraction: float, seed: Optional[int] = None
) -> "RDD[T]":
"""
Return a sampled subset of this RDD.
.. versionadded:: 0.7.0
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Returns
-------
:class:`RDD`
a new :class:`RDD` containing a sampled subset of elements
See Also
--------
:meth:`RDD.takeSample`
:meth:`RDD.sampleByKey`
:meth:`pyspark.sql.DataFrame.sample`
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
if not fraction >= 0:
raise ValueError("Fraction must be nonnegative.")
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(
self: "RDD[T]", weights: Sequence[Union[int, float]], seed: Optional[int] = None
) -> "List[RDD[T]]":
"""
Randomly splits this RDD with the provided weights.
.. versionadded:: 1.3.0
Parameters
----------
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split :class:`RDD`\\s in a list
See Also
--------
:meth:`pyspark.sql.DataFrame.randomSplit`
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
if not all(w >= 0 for w in weights):
raise ValueError("Weights must be nonnegative")
s = float(sum(weights))
if not s > 0:
raise ValueError("Sum of weights must be positive")
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2**32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(
self: "RDD[T]", withReplacement: bool, num: int, seed: Optional[int] = None
) -> List[T]:
"""
Return a fixed-size sampled subset of this RDD.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : list
whether sampling is done with replacement
num : int
size of the returned sample
seed : int, optional
random seed
Returns
-------
list
a fixed-size sampled subset of this :class:`RDD` in an array
See Also
--------
:meth:`RDD.sample`
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> import sys
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
>>> sc.range(0, 10).takeSample(False, sys.maxsize)
Traceback (most recent call last):
...
ValueError: Sample size cannot be greater than ...
"""
numStDev = 10.0
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
if num == 0 or self.getNumPartitions() == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(
sampleSizeLowerBound: int, total: int, withReplacement: bool
) -> float:
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
Returns
-------
:class:`RDD`
the union of this :class:`RDD` and another one
See Also
--------
:meth:`SparkContext.union`
:meth:`pyspark.sql.DataFrame.union`
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd: "RDD[Union[T, U]]" = RDD(
self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer
)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self: "RDD[T]", other: "RDD[T]") -> "RDD[T]":
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. versionadded:: 1.0.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
Returns
-------
:class:`RDD`
the intersection of this :class:`RDD` and another one
See Also
--------
:meth:`pyspark.sql.DataFrame.intersect`
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self: "RDD[T]", serializer: Optional[Serializer] = None) -> "RDD[T]":
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[S, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[["S"], int] = ...,
ascending: bool = ...,
) -> "RDD[Tuple[S, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int],
ascending: bool,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[[K], int] = ...,
ascending: bool = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[Any, Any]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[Any], int] = portable_hash,
ascending: bool = True,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[Any, Any]]":
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
.. versionadded:: 1.2.0
Parameters
----------
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
a function to compute the partition index
ascending : bool, optional, default True
sort the keys in ascending or descending order
keyfunc : function, optional, default identity mapping
a function to compute the key
Returns
-------
:class:`RDD`
a new :class:`RDD`
See Also
--------
:meth:`RDD.repartition`
:meth:`RDD.partitionBy`
:meth:`RDD.sortBy`
:meth:`RDD.sortByKey`
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
@overload
def sortByKey(
self: "RDD[Tuple[S, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool,
numPartitions: int,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: Optional[bool] = True,
numPartitions: Optional[int] = None,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[K, V]]":
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
.. versionadded:: 0.9.1
Parameters
----------
ascending : bool, optional, default True
sort the keys in ascending or descending order
numPartitions : int, optional
the number of partitions in new :class:`RDD`
keyfunc : function, optional, default identity mapping
a function to compute the key
Returns
-------
:class:`RDD`
a new :class:`RDD`
See Also
--------
:meth:`RDD.sortBy`
:meth:`pyspark.sql.DataFrame.sort`
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k: K) -> int:
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p # type: ignore[operator]
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(
self: "RDD[T]",
keyfunc: Callable[[T], "S"],
ascending: bool = True,
numPartitions: Optional[int] = None,
) -> "RDD[T]":
"""
Sorts this RDD by the given keyfunc
.. versionadded:: 1.1.0
Parameters
----------
keyfunc : function
a function to compute the key
ascending : bool, optional, default True
sort the keys in ascending or descending order
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a new :class:`RDD`
See Also
--------
:meth:`RDD.sortByKey`
:meth:`pyspark.sql.DataFrame.sort`
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return (
self.keyBy(keyfunc) # type: ignore[type-var]
.sortByKey(ascending, numPartitions)
.values()
)
def glom(self: "RDD[T]") -> "RDD[List[T]]":
"""
Return an RDD created by coalescing all elements within each partition
into a list.
.. versionadded:: 0.7.0
Returns
-------
:class:`RDD`
a new :class:`RDD` coalescing all elements within each partition into a list
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator: Iterable[T]) -> Iterable[List[T]]:
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
Returns
-------
:class:`RDD`
the Cartesian product of this :class:`RDD` and another one
See Also
--------
:meth:`pyspark.sql.DataFrame.crossJoin`
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(
self: "RDD[T]",
f: Callable[[T], K],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[T]]]":
"""
Return an RDD of grouped items.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to compute the key
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
a function to compute the partition index
Returns
-------
:class:`RDD`
a new :class:`RDD` of grouped items
See Also
--------
:meth:`RDD.groupByKey`
:meth:`pyspark.sql.DataFrame.groupBy`
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(
self, command: str, env: Optional[Dict[str, str]] = None, checkCode: bool = False
) -> "RDD[str]":
"""
Return an RDD created by piping elements to a forked external process.
.. versionadded:: 0.7.0
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether to check the return value of the shell command.
Returns
-------
:class:`RDD`
a new :class:`RDD` of strings
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator: Iterable[T]) -> Iterable[str]:
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out: IO[bytes]) -> None:
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code() -> Iterable[int]:
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
cast(bytes, x).rstrip(b"\n").decode("utf-8")
for x in chain(
iter(cast(IO[bytes], pipe.stdout).readline, b""), check_return_code()
)
)
return self.mapPartitions(func)
def foreach(self: "RDD[T]", f: Callable[[T], None]) -> None:
"""
Applies a function to all elements of this RDD.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function applyed to each element
See Also
--------
:meth:`RDD.foreachPartition`
:meth:`pyspark.sql.DataFrame.foreach`
:meth:`pyspark.sql.DataFrame.foreachPartition`
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator: Iterable[T]) -> Iterable[Any]:
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self: "RDD[T]", f: Callable[[Iterable[T]], None]) -> None:
"""
Applies a function to each partition of this RDD.
.. versionadded:: 1.0.0
Parameters
----------
f : function
a function applyed to each partition
See Also
--------
:meth:`RDD.foreach`
:meth:`pyspark.sql.DataFrame.foreach`
:meth:`pyspark.sql.DataFrame.foreachPartition`
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it: Iterable[T]) -> Iterable[Any]:
r = f(it)
try:
return iter(r) # type: ignore[call-overload]
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self: "RDD[T]") -> List[T]:
"""
Return a list that contains all the elements in this RDD.
.. versionadded:: 0.7.0
Returns
-------
list
a list containing all the elements
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
See Also
--------
:meth:`RDD.toLocalIterator`
:meth:`pyspark.sql.DataFrame.collect`
Examples
--------
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.parallelize(["x", "y", "z"]).collect()
['x', 'y', 'z']
"""
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(
self: "RDD[T]", groupId: str, description: str, interruptOnCancel: bool = False
) -> "List[T]":
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
Parameters
----------
groupId : str
The group ID to assign.
description : str
The description to set for the job group.
interruptOnCancel : bool, optional, default False
whether to interrupt jobs on job cancellation.
Returns
-------
list
a list containing all the elements
See Also
--------
:meth:`RDD.collect`
:meth:`SparkContext.setJobGroup`
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self: "RDD[T]", f: Callable[[T, T], T]) -> T:
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
.. versionadded:: 0.7.0
Parameters
----------
f : function
the reduce function
Returns
-------
T
the aggregated result
See Also
--------
:meth:`RDD.treeReduce`
:meth:`RDD.aggregate`
:meth:`RDD.treeAggregate`
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self: "RDD[T]", f: Callable[[T, T], T], depth: int = 2) -> T:
"""
Reduces the elements of this RDD in a multi-level tree pattern.
.. versionadded:: 1.3.0
Parameters
----------
f : function
the reduce function
depth : int, optional, default 2
suggested depth of the tree (default: 2)
Returns
-------
T
the aggregated result
See Also
--------
:meth:`RDD.reduce`
:meth:`RDD.aggregate`
:meth:`RDD.treeAggregate`
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
# Use the second entry to indicate whether this is a dummy value.
zeroValue: Tuple[T, bool] = ( # type: ignore[assignment]
None,
True,
)
def op(x: Tuple[T, bool], y: Tuple[T, bool]) -> Tuple[T, bool]:
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self: "RDD[T]", zeroValue: T, op: Callable[[T, T], T]) -> T:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
.. versionadded:: 0.7.0
Parameters
----------
zeroValue : T
the initial value for the accumulated result of each partition
op : function
a function used to both accumulate results within a partition and combine
results from different partitions
Returns
-------
T
the aggregated result
See Also
--------
:meth:`RDD.reduce`
:meth:`RDD.aggregate`
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator: Iterable[T]) -> Iterable[T]:
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(
self: "RDD[T]", zeroValue: U, seqOp: Callable[[U, T], U], combOp: Callable[[U, U], U]
) -> U:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
.. versionadded:: 1.1.0
Parameters
----------
zeroValue : U
the initial value for the accumulated result of each partition
seqOp : function
a function used to accumulate results within a partition
combOp : function
an associative function used to combine results from different partitions
Returns
-------
U
the aggregated result
See Also
--------
:meth:`RDD.reduce`
:meth:`RDD.fold`
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(
self: "RDD[T]",
zeroValue: U,
seqOp: Callable[[U, T], U],
combOp: Callable[[U, U], U],
depth: int = 2,
) -> U:
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
.. versionadded:: 1.3.0
Parameters
----------
zeroValue : U
the initial value for the accumulated result of each partition
seqOp : function
a function used to accumulate results within a partition
combOp : function
an associative function used to combine results from different partitions
depth : int, optional, default 2
suggested depth of the tree
Returns
-------
U
the aggregated result
See Also
--------
:meth:`RDD.aggregate`
:meth:`RDD.treeReduce`
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale # type: ignore[assignment]
curNumPartitions = int(numPartitions)
def mapPartition(i: int, iterator: Iterable[U]) -> Iterable[Tuple[int, U]]:
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
@overload
def max(self: "RDD[S]") -> "S":
...
@overload
def max(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def max(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the maximum item in this RDD.
.. versionadded:: 1.0.0
Parameters
----------
key : function, optional
A function used to generate key for comparing
Returns
-------
T
the maximum item
See Also
--------
:meth:`RDD.min`
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max) # type: ignore[arg-type]
return self.reduce(lambda a, b: max(a, b, key=key)) # type: ignore[arg-type]
@overload
def min(self: "RDD[S]") -> "S":
...
@overload
def min(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def min(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the minimum item in this RDD.
.. versionadded:: 1.0.0
Parameters
----------
key : function, optional
A function used to generate key for comparing
Returns
-------
T
the minimum item
See Also
--------
:meth:`RDD.max`
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min) # type: ignore[arg-type]
return self.reduce(lambda a, b: min(a, b, key=key)) # type: ignore[arg-type]
def sum(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Add up the elements in this RDD.
.. versionadded:: 0.7.0
Returns
-------
float, int, or complex
the sum of all elements
See Also
--------
:meth:`RDD.mean`
:meth:`RDD.sumApprox`
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold( # type: ignore[return-value]
0, operator.add
)
def count(self) -> int:
"""
Return the number of elements in this RDD.
.. versionadded:: 0.7.0
Returns
-------
int
the number of elements
See Also
--------
:meth:`RDD.countApprox`
:meth:`pyspark.sql.DataFrame.count`
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self: "RDD[NumberOrArray]") -> StatCounter:
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
.. versionadded:: 0.9.1
Returns
-------
:class:`StatCounter`
a :class:`StatCounter` capturing the mean, variance and count of all elements
See Also
--------
:meth:`RDD.stdev`
:meth:`RDD.sampleStdev`
:meth:`RDD.variance`
:meth:`RDD.sampleVariance`
:meth:`RDD.histogram`
:meth:`pyspark.sql.DataFrame.stat`
"""
def redFunc(left_counter: StatCounter, right_counter: StatCounter) -> StatCounter:
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce( # type: ignore[arg-type]
redFunc
)
def histogram(
self: "RDD[S]", buckets: Union[int, List["S"], Tuple["S", ...]]
) -> Tuple[Sequence["S"], List[int]]:
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
.. versionadded:: 1.2.0
Parameters
----------
buckets : int, or list, or tuple
if `buckets` is a number, it computes a histogram of the data using
`buckets` number of buckets evenly, otherwise, `buckets` is the provided
buckets to bin the data.
Returns
-------
tuple
a tuple of buckets and histogram
See Also
--------
:meth:`RDD.stats`
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x: Any) -> bool:
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a: Tuple["S", "S"], b: Tuple["S", "S"]) -> Tuple["S", "S"]:
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets # type: ignore[operator]
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv: # type: ignore[operator]
inc = (maxv - minv) * 1.0 / buckets # type: ignore[operator]
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [
buckets[i + 1] - buckets[i] # type: ignore[operator]
for i in range(len(buckets) - 1)
]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1) # type: ignore[operator]
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator: Iterable["S"]) -> Iterable[List[int]]:
counters = [0] * len(buckets) # type: ignore[arg-type]
for i in iterator:
if i is None or (isinstance(i, float) and isnan(i)) or i > maxv or i < minv:
continue
t = (
int((i - minv) / inc) # type: ignore[operator]
if even
else bisect.bisect_right(buckets, i) - 1 # type: ignore[arg-type]
)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a: List[int], b: List[int]) -> List[int]:
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self: "RDD[NumberOrArray]") -> float:
"""
Compute the mean of this RDD's elements.
.. versionadded:: 0.9.1
Returns
-------
float
the mean of all elements
See Also
--------
:meth:`RDD.stats`
:meth:`RDD.sum`
:meth:`RDD.meanApprox`
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self: "RDD[NumberOrArray]") -> float:
"""
Compute the variance of this RDD's elements.
.. versionadded:: 0.9.1
Returns
-------
float
the variance of all elements
See Also
--------
:meth:`RDD.stats`
:meth:`RDD.sampleVariance`
:meth:`RDD.stdev`
:meth:`RDD.sampleStdev`
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self: "RDD[NumberOrArray]") -> float:
"""
Compute the standard deviation of this RDD's elements.
.. versionadded:: 0.9.1
Returns
-------
float
the standard deviation of all elements
See Also
--------
:meth:`RDD.stats`
:meth:`RDD.sampleStdev`
:meth:`RDD.variance`
:meth:`RDD.sampleVariance`
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self: "RDD[NumberOrArray]") -> float:
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
.. versionadded:: 0.9.1
Returns
-------
float
the sample standard deviation of all elements
See Also
--------
:meth:`RDD.stats`
:meth:`RDD.stdev`
:meth:`RDD.variance`
:meth:`RDD.sampleVariance`
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self: "RDD[NumberOrArray]") -> float:
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
.. versionadded:: 0.9.1
Returns
-------
float
the sample variance of all elements
See Also
--------
:meth:`RDD.stats`
:meth:`RDD.variance`
:meth:`RDD.stdev`
:meth:`RDD.sampleStdev`
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self: "RDD[K]") -> Dict[K, int]:
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
.. versionadded:: 0.7.0
Returns
-------
dict
a dictionary of (value, count) pairs
See Also
--------
:meth:`RDD.collectAsMap`
:meth:`RDD.countByKey`
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator: Iterable[K]) -> Iterable[Dict[K, int]]:
counts: Dict[K, int] = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1: Dict[K, int], m2: Dict[K, int]) -> Dict[K, int]:
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
@overload
def top(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def top(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def top(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the top N elements from an RDD.
.. versionadded:: 1.0.0
Parameters
----------
num : int
top N
key : function, optional
a function used to generate key for comparing
Returns
-------
list
the top N elements
See Also
--------
:meth:`RDD.takeOrdered`
:meth:`RDD.max`
:meth:`RDD.min`
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator: Iterable[T]) -> Iterable[List[T]]:
yield heapq.nlargest(num, iterator, key=key)
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
@overload
def takeOrdered(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def takeOrdered(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def takeOrdered(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. versionadded:: 1.0.0
Parameters
----------
num : int
top N
key : function, optional
a function used to generate key for comparing
Returns
-------
list
the top N elements
See Also
--------
:meth:`RDD.top`
:meth:`RDD.max`
:meth:`RDD.min`
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
>>> sc.emptyRDD().takeOrdered(3)
[]
"""
if num < 0:
raise ValueError("top N cannot be negative.")
if num == 0 or self.getNumPartitions() == 0:
return []
else:
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self: "RDD[T]", num: int) -> List[T]:
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. versionadded:: 0.7.0
Parameters
----------
num : int
first number of elements
Returns
-------
list
the first `num` elements
See Also
--------
:meth:`RDD.first`
:meth:`pyspark.sql.DataFrame.take`
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items: List[T] = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self: "RDD[T]") -> T:
"""
Return the first element in this RDD.
.. versionadded:: 0.7.0
Returns
-------
T
the first element
See Also
--------
:meth:`RDD.take`
:meth:`pyspark.sql.DataFrame.first`
:meth:`pyspark.sql.DataFrame.head`
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self) -> bool:
"""
Returns true if and only if the RDD contains no elements at all.
.. versionadded:: 1.3.0
Returns
-------
bool
whether the :class:`RDD` is empty
See Also
--------
:meth:`RDD.first`
:meth:`pyspark.sql.DataFrame.isEmpty`
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
.. versionadded:: 1.1.0
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
See Also
--------
:meth:`SparkContext.newAPIHadoopRDD`
:meth:`RDD.saveAsHadoopDataset`
:meth:`RDD.saveAsHadoopFile`
:meth:`RDD.saveAsNewAPIHadoopFile`
:meth:`RDD.saveAsSequenceFile`
Examples
--------
>>> import os
>>> import tempfile
Set the related classes
>>> output_format_class = "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"
>>> input_format_class = "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat"
>>> key_class = "org.apache.hadoop.io.IntWritable"
>>> value_class = "org.apache.hadoop.io.Text"
>>> with tempfile.TemporaryDirectory() as d:
... path = os.path.join(d, "new_hadoop_file")
...
... # Create the conf for writing
... write_conf = {
... "mapreduce.job.outputformat.class": (output_format_class),
... "mapreduce.job.output.key.class": key_class,
... "mapreduce.job.output.value.class": value_class,
... "mapreduce.output.fileoutputformat.outputdir": path,
... }
...
... # Write a temporary Hadoop file
... rdd = sc.parallelize([(1, ""), (1, "a"), (3, "x")])
... rdd.saveAsNewAPIHadoopDataset(conf=write_conf)
...
... # Create the conf for reading
... read_conf = {"mapreduce.input.fileinputformat.inputdir": path}
...
... # Load this Hadoop file as an RDD
... loaded = sc.newAPIHadoopRDD(input_format_class,
... key_class, value_class, conf=read_conf)
... sorted(loaded.collect())
[(1, ''), (1, 'a'), (3, 'x')]
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
.. versionadded:: 1.1.0
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
See Also
--------
:meth:`SparkContext.newAPIHadoopFile`
:meth:`RDD.saveAsHadoopDataset`
:meth:`RDD.saveAsNewAPIHadoopDataset`
:meth:`RDD.saveAsHadoopFile`
:meth:`RDD.saveAsSequenceFile`
Examples
--------
>>> import os
>>> import tempfile
Set the class of output format
>>> output_format_class = "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"
>>> with tempfile.TemporaryDirectory() as d:
... path = os.path.join(d, "hadoop_file")
...
... # Write a temporary Hadoop file
... rdd = sc.parallelize([(1, {3.0: "bb"}), (2, {1.0: "aa"}), (3, {2.0: "dd"})])
... rdd.saveAsNewAPIHadoopFile(path, output_format_class)
...
... # Load this Hadoop file as an RDD
... sorted(sc.sequenceFile(path).collect())
[(1, {3.0: 'bb'}), (2, {1.0: 'aa'}), (3, {2.0: 'dd'})]
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
.. versionadded:: 1.1.0
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
See Also
--------
:meth:`SparkContext.hadoopRDD`
:meth:`RDD.saveAsNewAPIHadoopDataset`
:meth:`RDD.saveAsHadoopFile`
:meth:`RDD.saveAsNewAPIHadoopFile`
:meth:`RDD.saveAsSequenceFile`
Examples
--------
>>> import os
>>> import tempfile
Set the related classes
>>> output_format_class = "org.apache.hadoop.mapred.TextOutputFormat"
>>> input_format_class = "org.apache.hadoop.mapred.TextInputFormat"
>>> key_class = "org.apache.hadoop.io.IntWritable"
>>> value_class = "org.apache.hadoop.io.Text"
>>> with tempfile.TemporaryDirectory() as d:
... path = os.path.join(d, "old_hadoop_file")
...
... # Create the conf for writing
... write_conf = {
... "mapred.output.format.class": output_format_class,
... "mapreduce.job.output.key.class": key_class,
... "mapreduce.job.output.value.class": value_class,
... "mapreduce.output.fileoutputformat.outputdir": path,
... }
...
... # Write a temporary Hadoop file
... rdd = sc.parallelize([(1, ""), (1, "a"), (3, "x")])
... rdd.saveAsHadoopDataset(conf=write_conf)
...
... # Create the conf for reading
... read_conf = {"mapreduce.input.fileinputformat.inputdir": path}
...
... # Load this Hadoop file as an RDD
... loaded = sc.hadoopRDD(input_format_class, key_class, value_class, conf=read_conf)
... sorted(loaded.collect())
[(0, '1\\t'), (0, '1\\ta'), (0, '3\\tx')]
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
compressionCodecClass: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
.. versionadded:: 1.1.0
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
See Also
--------
:meth:`SparkContext.hadoopFile`
:meth:`RDD.saveAsNewAPIHadoopFile`
:meth:`RDD.saveAsHadoopDataset`
:meth:`RDD.saveAsNewAPIHadoopDataset`
:meth:`RDD.saveAsSequenceFile`
Examples
--------
>>> import os
>>> import tempfile
Set the related classes
>>> output_format_class = "org.apache.hadoop.mapred.TextOutputFormat"
>>> input_format_class = "org.apache.hadoop.mapred.TextInputFormat"
>>> key_class = "org.apache.hadoop.io.IntWritable"
>>> value_class = "org.apache.hadoop.io.Text"
>>> with tempfile.TemporaryDirectory() as d:
... path = os.path.join(d, "old_hadoop_file")
...
... # Write a temporary Hadoop file
... rdd = sc.parallelize([(1, ""), (1, "a"), (3, "x")])
... rdd.saveAsHadoopFile(path, output_format_class, key_class, value_class)
...
... # Load this Hadoop file as an RDD
... loaded = sc.hadoopFile(path, input_format_class, key_class, value_class)
... sorted(loaded.collect())
[(0, '1\\t'), (0, '1\\ta'), (0, '3\\tx')]
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(
self: "RDD[Tuple[K, V]]", path: str, compressionCodecClass: Optional[str] = None
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
.. versionadded:: 1.1.0
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
See Also
--------
:meth:`SparkContext.sequenceFile`
:meth:`RDD.saveAsHadoopFile`
:meth:`RDD.saveAsNewAPIHadoopFile`
:meth:`RDD.saveAsHadoopDataset`
:meth:`RDD.saveAsNewAPIHadoopDataset`
:meth:`RDD.saveAsSequenceFile`
Examples
--------
>>> import os
>>> import tempfile
Set the related classes
>>> with tempfile.TemporaryDirectory() as d:
... path = os.path.join(d, "sequence_file")
...
... # Write a temporary sequence file
... rdd = sc.parallelize([(1, ""), (1, "a"), (3, "x")])
... rdd.saveAsSequenceFile(path)
...
... # Load this sequence file as an RDD
... loaded = sc.sequenceFile(path)
... sorted(loaded.collect())
[(1, ''), (1, 'a'), (3, 'x')]
"""
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path: str, batchSize: int = 10) -> None:
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
.. versionadded:: 1.1.0
Parameters
----------
path : str
path to pickled file
batchSize : int, optional, default 10
the number of Python objects represented as a single Java object.
See Also
--------
:meth:`SparkContext.pickleFile`
Examples
--------
>>> import os
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as d:
... path = os.path.join(d, "pickle_file")
...
... # Write a temporary pickled file
... sc.parallelize(range(10)).saveAsPickleFile(path, 3)
...
... # Load picked file as an RDD
... sorted(sc.pickleFile(path, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
ser: Serializer
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path: str, compressionCodecClass: Optional[str] = None) -> None:
"""
Save this RDD as a text file, using string representations of elements.
.. versionadded:: 0.7.0
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
See Also
--------
:meth:`SparkContext.textFile`
:meth:`SparkContext.wholeTextFiles`
Examples
--------
>>> import os
>>> import tempfile
>>> from fileinput import input
>>> from glob import glob
>>> with tempfile.TemporaryDirectory() as d1:
... path1 = os.path.join(d1, "text_file1")
...
... # Write a temporary text file
... sc.parallelize(range(10)).saveAsTextFile(path1)
...
... # Load text file as an RDD
... ''.join(sorted(input(glob(path1 + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> with tempfile.TemporaryDirectory() as d2:
... path2 = os.path.join(d2, "text2_file2")
...
... # Write another temporary text file
... sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(path2)
...
... # Load text file as an RDD
... ''.join(sorted(input(glob(path2 + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from fileinput import input, hook_compressed
>>> with tempfile.TemporaryDirectory() as d3:
... path3 = os.path.join(d3, "text3")
... codec = "org.apache.hadoop.io.compress.GzipCodec"
...
... # Write another temporary text file with specified codec
... sc.parallelize(['foo', 'bar']).saveAsTextFile(path3, codec)
...
... # Load text file as an RDD
... result = sorted(input(glob(path3 + "/part*.gz"), openhook=hook_compressed))
... ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split: int, iterator: Iterable[Any]) -> Iterable[bytes]:
for x in iterator:
if isinstance(x, bytes):
yield x
elif isinstance(x, str):
yield x.encode("utf-8")
else:
yield str(x).encode("utf-8")
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self: "RDD[Tuple[K, V]]") -> Dict[K, V]:
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. versionadded:: 0.7.0
Returns
-------
:class:`dict`
a dictionary of (key, value) pairs
See Also
--------
:meth:`RDD.countByValue`
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self: "RDD[Tuple[K, V]]") -> "RDD[K]":
"""
Return an RDD with the keys of each tuple.
.. versionadded:: 0.7.0
Returns
-------
:class:`RDD`
a :class:`RDD` only containing the keys
See Also
--------
:meth:`RDD.values`
Examples
--------
>>> rdd = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> rdd.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self: "RDD[Tuple[K, V]]") -> "RDD[V]":
"""
Return an RDD with the values of each tuple.
.. versionadded:: 0.7.0
Returns
-------
:class:`RDD`
a :class:`RDD` only containing the values
See Also
--------
:meth:`RDD.keys`
Examples
--------
>>> rdd = sc.parallelize([(1, 2), (3, 4)]).values()
>>> rdd.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(
self: "RDD[Tuple[K, V]]",
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
.. versionadded:: 1.6.0
Parameters
----------
func : function
the reduce function
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
function to compute the partition index
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the aggregated result for each key
See Also
--------
:meth:`RDD.reduceByKeyLocally`
:meth:`RDD.combineByKey`
:meth:`RDD.aggregateByKey`
:meth:`RDD.foldByKey`
:meth:`RDD.groupByKey`
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self: "RDD[Tuple[K, V]]", func: Callable[[V, V], V]) -> Dict[K, V]:
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
.. versionadded:: 0.7.0
Parameters
----------
func : function
the reduce function
Returns
-------
dict
a dict containing the keys and the aggregated result for each key
See Also
--------
:meth:`RDD.reduceByKey`
:meth:`RDD.aggregateByKey`
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Dict[K, V]]:
m: Dict[K, V] = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1: Dict[K, V], m2: Dict[K, V]) -> Dict[K, V]:
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self: "RDD[Tuple[K, V]]") -> Dict[K, int]:
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
.. versionadded:: 0.7.0
Returns
-------
dict
a dictionary of (key, count) pairs
See Also
--------
:meth:`RDD.collectAsMap`
:meth:`RDD.countByValue`
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, U]]]":
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` containing all pairs of elements with matching keys
See Also
--------
:meth:`RDD.leftOuterJoin`
:meth:`RDD.rightOuterJoin`
:meth:`RDD.fullOuterJoin`
:meth:`RDD.cogroup`
:meth:`RDD.groupWith`
:meth:`pyspark.sql.DataFrame.join`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4)])
>>> rdd2 = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(rdd1.join(rdd2).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, Optional[U]]]]":
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` containing all pairs of elements with matching keys
See Also
--------
:meth:`RDD.join`
:meth:`RDD.rightOuterJoin`
:meth:`RDD.fullOuterJoin`
:meth:`pyspark.sql.DataFrame.join`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4)])
>>> rdd2 = sc.parallelize([("a", 2)])
>>> sorted(rdd1.leftOuterJoin(rdd2).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], U]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` containing all pairs of elements with matching keys
See Also
--------
:meth:`RDD.join`
:meth:`RDD.leftOuterJoin`
:meth:`RDD.fullOuterJoin`
:meth:`pyspark.sql.DataFrame.join`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4)])
>>> rdd2 = sc.parallelize([("a", 2)])
>>> sorted(rdd2.rightOuterJoin(rdd1).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], Optional[U]]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
.. versionadded:: 1.2.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` containing all pairs of elements with matching keys
See Also
--------
:meth:`RDD.join`
:meth:`RDD.leftOuterJoin`
:meth:`RDD.fullOuterJoin`
:meth:`pyspark.sql.DataFrame.join`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4)])
>>> rdd2 = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(rdd1.fullOuterJoin(rdd2).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Return a copy of the RDD partitioned using the specified partitioner.
.. versionadded:: 0.7.0
Parameters
----------
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
function to compute the partition index
Returns
-------
:class:`RDD`
a :class:`RDD` partitioned using the specified partitioner
See Also
--------
:meth:`RDD.repartition`
:meth:`RDD.repartitionAndSortWithinPartitions`
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split: int, iterator: Iterable[Tuple[K, V]]) -> Iterable[bytes]:
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000) # type: ignore[operator]
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v)) # type: ignore[operator]
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5) # type: ignore[assignment]
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd: "RDD[Tuple[K, V]]" = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self: "RDD[Tuple[K, V]]",
createCombiner: Callable[[V], U],
mergeValue: Callable[[U, V], U],
mergeCombiners: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. versionadded:: 0.7.0
Parameters
----------
createCombiner : function
a function to turns a V into a C
mergeValue : function
a function to merge a V into a C
mergeCombiners : function
a function to combine two C's into a single one
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
function to compute the partition index
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the aggregated result for each key
See Also
--------
:meth:`RDD.reduceByKey`
:meth:`RDD.aggregateByKey`
:meth:`RDD.foldByKey`
:meth:`RDD.groupByKey`
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(rdd.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator: Iterable[Tuple[K, U]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: U,
seqFunc: Callable[[U, V], U],
combFunc: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
.. versionadded:: 1.1.0
Parameters
----------
zeroValue : U
the initial value for the accumulated result of each partition
seqFunc : function
a function to merge a V into a U
combFunc : function
a function to combine two U's into a single one
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
function to compute the partition index
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the aggregated result for each key
See Also
--------
:meth:`RDD.reduceByKey`
:meth:`RDD.combineByKey`
:meth:`RDD.foldByKey`
:meth:`RDD.groupByKey`
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> seqFunc = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combFunc = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sorted(rdd.aggregateByKey((0, 0), seqFunc, combFunc).collect())
[('a', (3, 2)), ('b', (1, 1))]
"""
def createZero() -> U:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: V,
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
.. versionadded:: 1.1.0
Parameters
----------
zeroValue : V
the initial value for the accumulated result of each partition
func : function
a function to combine two V's into a single one
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
function to compute the partition index
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the aggregated result for each key
See Also
--------
:meth:`RDD.reduceByKey`
:meth:`RDD.combineByKey`
:meth:`RDD.aggregateByKey`
:meth:`RDD.groupByKey`
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero() -> V:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self) -> int:
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[V]]]":
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. versionadded:: 0.7.0
Parameters
----------
numPartitions : int, optional
the number of partitions in new :class:`RDD`
partitionFunc : function, optional, default `portable_hash`
function to compute the partition index
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the grouped result for each key
See Also
--------
:meth:`RDD.reduceByKey`
:meth:`RDD.combineByKey`
:meth:`RDD.aggregateByKey`
:meth:`RDD.foldByKey`
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x: V) -> List[V]:
return [x]
def mergeValue(xs: List[V], x: V) -> List[V]:
xs.append(x)
return xs
def mergeCombiners(a: List[V], b: List[V]) -> List[V]:
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it: Iterable[Tuple[K, List[V]]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(
self: "RDD[Tuple[K, V]]", f: Callable[[V], Iterable[U]]
) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to trun a V into a sequence of U
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the flat-mapped value
See Also
--------
:meth:`RDD.flatMap`
:meth:`RDD.mapValues`
Examples
--------
>>> rdd = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> rdd.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
def flat_map_fn(kv: Tuple[K, V]) -> Iterable[Tuple[K, U]]:
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self: "RDD[Tuple[K, V]]", f: Callable[[V], U]) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
.. versionadded:: 0.7.0
Parameters
----------
f : function
a function to trun a V into a U
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and the mapped value
See Also
--------
:meth:`RDD.map`
:meth:`RDD.flatMapValues`
Examples
--------
>>> rdd = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> rdd.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
def map_values_fn(kv: Tuple[K, V]) -> Tuple[K, U]:
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]", __o1: "RDD[Tuple[K, V2]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1], ResultIterable[V2]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, V1]]",
_o1: "RDD[Tuple[K, V2]]",
_o2: "RDD[Tuple[K, V3]]",
) -> """RDD[
Tuple[
K,
Tuple[
ResultIterable[V],
ResultIterable[V1],
ResultIterable[V2],
ResultIterable[V3],
],
]
]""":
...
def groupWith( # type: ignore[misc]
self: "RDD[Tuple[Any, Any]]", other: "RDD[Tuple[Any, Any]]", *others: "RDD[Tuple[Any, Any]]"
) -> "RDD[Tuple[Any, Tuple[ResultIterable[Any], ...]]]":
"""
Alias for cogroup but with support for multiple RDDs.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
others : :class:`RDD`
other :class:`RDD`\\s
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and cogouped values
See Also
--------
:meth:`RDD.cogroup`
:meth:`RDD.join`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 5), ("b", 6)])
>>> rdd2 = sc.parallelize([("a", 1), ("b", 4)])
>>> rdd3 = sc.parallelize([("a", 2)])
>>> rdd4 = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in
... sorted(list(rdd1.groupWith(rdd2, rdd3, rdd4).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[U]]]]":
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
.. versionadded:: 0.7.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` containing the keys and cogouped values
See Also
--------
:meth:`RDD.groupWith`
:meth:`RDD.join`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4)])
>>> rdd2 = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(rdd1.cogroup(rdd2).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(
self: "RDD[Tuple[K, V]]",
withReplacement: bool,
fractions: Dict[K, Union[float, int]],
seed: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
.. versionadded:: 0.7.0
Parameters
----------
withReplacement : bool
whether to sample with or without replacement
fractions : dict
map of specific keys to sampling rates
seed : int, optional
seed for the random number generator
Returns
-------
:class:`RDD`
a :class:`RDD` containing the stratified sampling result
See Also
--------
:meth:`RDD.sample`
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, Any]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
.. versionadded:: 0.9.1
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` with the pairs from this whose keys are not in `other`
See Also
--------
:meth:`RDD.subtract`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> rdd2 = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(rdd1.subtractByKey(rdd2).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair: Tuple[K, Tuple[V, Any]]) -> bool:
key, (val1, val2) = pair
return val1 and not val2 # type: ignore[return-value]
return (
self.cogroup(other, numPartitions)
.filter(filter_func) # type: ignore[arg-type]
.flatMapValues(lambda x: x[0])
)
def subtract(self: "RDD[T]", other: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return each value in `self` that is not contained in `other`.
.. versionadded:: 0.9.1
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` with the elements from this that are not in `other`
See Also
--------
:meth:`RDD.subtractByKey`
Examples
--------
>>> rdd1 = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> rdd2 = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(rdd1.subtract(rdd2).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self: "RDD[T]", f: Callable[[T], K]) -> "RDD[Tuple[K, T]]":
"""
Creates tuples of the elements in this RDD by applying `f`.
.. versionadded:: 0.9.1
Parameters
----------
f : function
a function to compute the key
Returns
-------
:class:`RDD`
a :class:`RDD` with the elements from this that are not in `other`
See Also
--------
:meth:`RDD.map`
:meth:`RDD.keys`
:meth:`RDD.values`
Examples
--------
>>> rdd1 = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> rdd2 = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(rdd1.cogroup(rdd2).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self: "RDD[T]", numPartitions: int) -> "RDD[T]":
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
.. versionadded:: 1.0.0
Parameters
----------
numPartitions : int, optional
the number of partitions in new :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` with exactly numPartitions partitions
See Also
--------
:meth:`RDD.coalesce`
:meth:`RDD.partitionBy`
:meth:`RDD.repartitionAndSortWithinPartitions`
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self: "RDD[T]", numPartitions: int, shuffle: bool = False) -> "RDD[T]":
"""
Return a new RDD that is reduced into `numPartitions` partitions.
.. versionadded:: 1.0.0
Parameters
----------
numPartitions : int, optional
the number of partitions in new :class:`RDD`
shuffle : bool, optional, default False
whether to add a shuffle step
Returns
-------
:class:`RDD`
a :class:`RDD` that is reduced into `numPartitions` partitions
See Also
--------
:meth:`RDD.repartition`
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if not numPartitions > 0:
raise ValueError("Number of partitions must be positive.")
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
.. versionadded:: 1.0.0
Parameters
----------
other : :class:`RDD`
another :class:`RDD`
Returns
-------
:class:`RDD`
a :class:`RDD` containing the zipped key-value pairs
See Also
--------
:meth:`RDD.zipWithIndex`
:meth:`RDD.zipWithUniqueId`
Examples
--------
>>> rdd1 = sc.parallelize(range(0,5))
>>> rdd2 = sc.parallelize(range(1000, 1005))
>>> rdd1.zip(rdd2).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser: Serializer) -> int:
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd: "RDD[V]", batchSize: int) -> "RDD[V]":
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
.. versionadded:: 1.2.0
Returns
-------
:class:`RDD`
a :class:`RDD` containing the zipped key-index pairs
See Also
--------
:meth:`RDD.zip`
:meth:`RDD.zipWithUniqueId`
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
.. versionadded:: 1.2.0
Returns
-------
:class:`RDD`
a :class:`RDD` containing the zipped key-UniqueId pairs
See Also
--------
:meth:`RDD.zip`
:meth:`RDD.zipWithIndex`
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self) -> Optional[str]:
"""
Return the name of this RDD.
.. versionadded:: 1.0.0
Returns
-------
str
:class:`RDD` name
See Also
--------
:meth:`RDD.setName`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.name() == None
True
"""
n = self._jrdd.name()
return n if n else None
def setName(self: "RDD[T]", name: str) -> "RDD[T]":
"""
Assign a name to this RDD.
.. versionadded:: 1.0.0
Parameters
----------
name : str
new name
Returns
-------
:class:`RDD`
the same :class:`RDD` with name updated
See Also
--------
:meth:`RDD.name`
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> rdd.setName('I am an RDD').name()
'I am an RDD'
"""
self._jrdd.setName(name)
return self
def toDebugString(self) -> Optional[bytes]:
"""
A description of this RDD and its recursive dependencies for debugging.
.. versionadded:: 1.0.0
Returns
-------
bytes
debugging information of this :class:`RDD`
Examples
--------
>>> rdd = sc.range(5)
>>> rdd.toDebugString()
b'...PythonRDD...ParallelCollectionRDD...'
"""
debug_string = self._jrdd.toDebugString()
return debug_string.encode("utf-8") if debug_string else None
def getStorageLevel(self) -> StorageLevel:
"""
Get the RDD's current storage level.
.. versionadded:: 1.0.0
Returns
-------
:class:`StorageLevel`
current :class:`StorageLevel`
See Also
--------
:meth:`RDD.name`
Examples
--------
>>> rdd = sc.parallelize([1,2])
>>> rdd.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self) -> int:
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self: "RDD[Tuple[K, V]]", key: K) -> List[V]:
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
.. versionadded:: 1.2.0
Parameters
----------
key : K
the key to look up
Returns
-------
list
the list of values in the :class:`RDD` for key `key`
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self) -> "JavaObject":
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
assert self.ctx._jvm is not None
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout: int, confidence: float = 0.95) -> int:
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
.. versionadded:: 1.2.0
Parameters
----------
timeout : int
maximum time to wait for the job, in milliseconds
confidence : float
the desired statistical confidence in the result
Returns
-------
int
a potentially incomplete result, with error bounds
See Also
--------
:meth:`RDD.count`
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
.. versionadded:: 1.2.0
Parameters
----------
timeout : int
maximum time to wait for the job, in milliseconds
confidence : float
the desired statistical confidence in the result
Returns
-------
:class:`BoundedFloat`
a potentially incomplete result, with error bounds
See Also
--------
:meth:`RDD.sum`
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
.. versionadded:: 1.2.0
Parameters
----------
timeout : int
maximum time to wait for the job, in milliseconds
confidence : float
the desired statistical confidence in the result
Returns
-------
:class:`BoundedFloat`
a potentially incomplete result, with error bounds
See Also
--------
:meth:`RDD.mean`
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self: "RDD[T]", relativeSD: float = 0.05) -> int:
"""
Return approximate number of distinct elements in the RDD.
.. versionadded:: 1.2.0
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Returns
-------
int
approximate number of distinct elements
See Also
--------
:meth:`RDD.distinct`
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self: "RDD[T]", prefetchPartitions: bool = False) -> Iterator[T]:
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
.. versionadded:: 1.3.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Returns
-------
:class:`collections.abc.Iterator`
an iterator that contains all of the elements in this :class:`RDD`
See Also
--------
:meth:`RDD.collect`
:meth:`pyspark.sql.DataFrame.toLocalIterator`
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self: "RDD[T]") -> "RDDBarrier[T]":
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
:class:`pyspark.BarrierTaskContext`
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self) -> bool:
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self: "RDD[T]", profile: ResourceProfile) -> "RDD[T]":
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Parameters
----------
profile : :class:`pyspark.resource.ResourceProfile`
a resource profile
Returns
-------
:class:`RDD`
the same :class:`RDD` with user specified profile
See Also
--------
:meth:`RDD.getResourceProfile`
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
assert self.ctx._jvm is not None
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self) -> Optional[ResourceProfile]:
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
See Also
--------
:meth:`RDD.withResources`
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
@overload
def toDF(
self: "RDD[RowLike]",
schema: Optional[Union[List[str], Tuple[str, ...]]] = None,
sampleRatio: Optional[float] = None,
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[RowLike]", schema: Optional[Union["StructType", str]] = None
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[AtomicValue]",
schema: Union["AtomicType", str],
) -> "DataFrame":
...
def toDF(
self: "RDD[Any]", schema: Optional[Any] = None, sampleRatio: Optional[float] = None
) -> "DataFrame":
raise RuntimeError("""RDD.toDF was called before SparkSession was initialized.""")
def _prepare_for_python_RDD(sc: "SparkContext", command: Any) -> Tuple[bytes, Any, Any, Any]:
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
assert sc._jvm is not None
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(
sc: "SparkContext", func: Callable, deserializer: Any, serializer: Any, profiler: Any = None
) -> "JavaObject":
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
assert sc._jvm is not None
return sc._jvm.SimplePythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier(Generic[T]):
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :meth:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd: RDD[T]):
self.rdd = rdd
def mapPartitions(
self, f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :meth:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Parameters
----------
f : function
a function to run on each partition of the RDD
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to each partition
See Also
--------
:meth:`RDD.mapPartitions`
Notes
-----
This API is experimental
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> barrier = rdd.barrier()
>>> barrier
<pyspark.rdd.RDDBarrier ...>
>>> barrier.mapPartitions(f).collect()
[3, 7]
"""
def func(s: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(
self,
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :meth:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Parameters
----------
f : function
a function to run on each partition of the RDD
preservesPartitioning : bool, optional, default False
indicates whether the input function preserves the partitioner,
which should be False unless this is a pair RDD and the input
Returns
-------
:class:`RDD`
a new :class:`RDD` by applying a function to each partition
See Also
--------
:meth:`RDD.mapPartitionsWithIndex`
Notes
-----
This API is experimental
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> barrier = rdd.barrier()
>>> barrier
<pyspark.rdd.RDDBarrier ...>
>>> barrier.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD[U], Generic[T, U]):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(
self,
prev: RDD[T],
func: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
isFromBarrier: bool = False,
):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func: Callable[[int, Iterable[V]], Iterable[T]] = prev.func
def pipeline_func(split: int, iterator: Iterable[V]) -> Iterable[U]:
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val: Optional["JavaObject"] = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self) -> int:
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self) -> "JavaObject":
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
assert self.ctx._jvm is not None
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
assert self._jrdd_val is not None
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self) -> int:
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self) -> bool:
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self) -> bool:
return self.is_barrier
def _test() -> None:
import doctest
import tempfile
from pyspark.context import SparkContext
tmp_dir = tempfile.TemporaryDirectory()
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
globs["sc"].setCheckpointDir(tmp_dir.name)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
tmp_dir.cleanup()
if failure_count:
tmp_dir.cleanup()
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "40d6f6ff0fd56271ef4b19df575b2b42",
"timestamp": "",
"source": "github",
"line_count": 5469,
"max_line_length": 100,
"avg_line_length": 31.92887182300238,
"alnum_prop": 0.5409376986467681,
"repo_name": "shaneknapp/spark",
"id": "5f4f4d494e13c476b242d2e2307bf21a11dbd644",
"size": "175404",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pyspark/rdd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "60021"
},
{
"name": "Batchfile",
"bytes": "27482"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26338"
},
{
"name": "Dockerfile",
"bytes": "16252"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1859465"
},
{
"name": "Java",
"bytes": "4736955"
},
{
"name": "JavaScript",
"bytes": "223014"
},
{
"name": "Jupyter Notebook",
"bytes": "4310512"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "PLpgSQL",
"bytes": "352609"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "8368428"
},
{
"name": "R",
"bytes": "1287401"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "32632"
},
{
"name": "Scala",
"bytes": "44294294"
},
{
"name": "Shell",
"bytes": "245444"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "111129"
}
],
"symlink_target": ""
} |
"""Package that handles non-debug, non-file output for run-webkit-tests."""
import math
import optparse
from webkitpy.tool import grammar
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
optparse.make_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
optparse.make_option('--timing', action='store_true', default=False,
help='display test times (summary plus per-test w/ --verbose)'),
optparse.make_option('-v', '--verbose', action='store_true', default=False,
help='print a summarized result for every test (one line per test)'),
optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'),
optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'),
]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("View the test results at file://%s/results.html" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_time, run_results)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
stats = {}
cuml_time = 0
for result in run_results.results_by_name.values():
stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
stats[result.worker_name]['num_tests'] += 1
stats[result.worker_name]['total_time'] += result.total_run_time
cuml_time += result.total_run_time
for worker_name in stats:
self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
def _print_aggregate_test_statistics(self, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in the driver.
individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug("")
if unexpected_slow_tests:
self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug("")
if slow_tests:
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
self._print_debug("")
if timeout_or_crash_tests:
self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
self._print_debug("")
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
min_seconds_to_print = 15
timings = []
for directory in stats:
rounded_time = round(stats[directory]['total_time'], 1)
if rounded_time > min_seconds_to_print:
timings.append((directory, rounded_time, stats[directory]['num_tests']))
if not timings:
return
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
for timing in timings:
self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(" Median: %6.3f" % median)
self._print_debug(" Mean: %6.3f" % mean)
self._print_debug(" 90th percentile: %6.3f" % percentile90)
self._print_debug(" 99th percentile: %6.3f" % percentile99)
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
def _print_one_line_summary(self, total_time, run_results):
if self._options.timing:
parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
# There is serial overhead in layout_test_runner.run() that we can't easily account for when
# really running in parallel, but taking the min() ensures that in the worst case
# (if parallel time is less than run_time) we do account for it.
serial_time = total_time - min(run_results.run_time, parallel_time)
speedup = (parallel_time + serial_time) / total_time
timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
else:
timing_summary = ''
total = run_results.total - run_results.expected_skips
expected = run_results.expected - run_results.expected_skips
unexpected = run_results.unexpected
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
if self._options.verbose or self._options.debug_rwt_logging or unexpected:
self.writeln("")
expected_summary_str = ''
if run_results.expected_failures > 0:
expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)
summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
else:
summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
else:
summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
else:
summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)
self._print_quiet(summary)
self._print_quiet("")
def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
if len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_completed, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
else:
suffix = ''
if self._options.verbose:
write = self._meter.write_update
else:
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
self.num_completed += 1
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected,
self._options.timing, result.test_run_time)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
elif self._options.verbose or not expected:
self.writeln(self._test_status_line(test_name, result_message))
elif self.num_completed == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
self._completed_tests.insert(0, [test_name, result_message])
else:
self._completed_tests.append([test_name, result_message])
for test_name, result_message in self._completed_tests:
self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
self._completed_tests = []
self._running_tests.remove(test_name)
def _result_message(self, result_type, failures, expected, timing, test_run_time):
exp_string = ' unexpectedly' if not expected else ''
timing_string = ' %.4fs' % test_run_time if timing else ''
if result_type == test_expectations.PASS:
return ' passed%s%s' % (exp_string, timing_string)
else:
return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
def _print_test_trace(self, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
base = self._port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
self._print_default(' base: %s' % base)
self._print_default(' args: %s' % args)
references = self._port.reference_files(test_name)
if references:
for _, filename in references:
self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
else:
for extension in ('.txt', '.png', '.wav'):
self._print_baseline(test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port._filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
def _print_quiet(self, msg):
self.writeln(msg)
def _print_default(self, msg):
if not self._options.quiet:
self.writeln(msg)
def _print_debug(self, msg):
if self._options.debug_rwt_logging:
self.writeln(msg)
def write_throttled_update(self, msg):
self._meter.write_throttled_update(msg)
def write_update(self, msg):
self._meter.write_update(msg)
def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
self._meter.flush()
| {
"content_hash": "b5101c5077336048f6c6fca1ffcc6c41",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 166,
"avg_line_length": 45.590123456790124,
"alnum_prop": 0.5998700173310225,
"repo_name": "lordmos/blink",
"id": "598f638a81f7e2502668e5ff30ebe4b20b2c3739",
"size": "20000",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tools/Scripts/webkitpy/layout_tests/views/printing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6433"
},
{
"name": "C",
"bytes": "753714"
},
{
"name": "C++",
"bytes": "40028043"
},
{
"name": "CSS",
"bytes": "539440"
},
{
"name": "F#",
"bytes": "8755"
},
{
"name": "Java",
"bytes": "18650"
},
{
"name": "JavaScript",
"bytes": "25700387"
},
{
"name": "Objective-C",
"bytes": "426711"
},
{
"name": "PHP",
"bytes": "141755"
},
{
"name": "Perl",
"bytes": "901523"
},
{
"name": "Python",
"bytes": "3748305"
},
{
"name": "Ruby",
"bytes": "141818"
},
{
"name": "Shell",
"bytes": "9635"
},
{
"name": "XSLT",
"bytes": "49328"
}
],
"symlink_target": ""
} |
from inliner.transformers.BaseFunctionHandler import BaseFunctionHandler
import ast
class SimpleFunctionHandler(BaseFunctionHandler):
def inline(self, node, func_to_inline):
# Its a simple function we have here. That means it is one statement and we can simply replace the
# call with the inlined functions body
body = func_to_inline.body[0]
if isinstance(body, ast.Return):
body = body.value
return self.replace_params_with_objects(body, func_to_inline, node) | {
"content_hash": "d0961a624ecfda32be18da201441efa9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 106,
"avg_line_length": 39.92307692307692,
"alnum_prop": 0.7148362235067437,
"repo_name": "orf/inliner",
"id": "38bdac91cc219562c29c1527c5cc9e47e150761a",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inliner/transformers/SimpleFunctionHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7435"
}
],
"symlink_target": ""
} |
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils.encoding import force_bytes, force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
destructor = capi.destroy_feature
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, str):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| {
"content_hash": "15ccdd6f0d2912774d9fef56e7942c58",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 84,
"avg_line_length": 34.417391304347824,
"alnum_prop": 0.6243052046488126,
"repo_name": "mattseymour/django",
"id": "47e8bb1ae31c9efc0e07e6cf3f534d328fe541c6",
"size": "3958",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/contrib/gis/gdal/feature.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182963"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11845544"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Module containing our file processor that tokenizes a file for checks."""
import contextlib
import io
import logging
import sys
import tokenize
import flake8
from flake8 import defaults
from flake8 import exceptions
from flake8 import utils
LOG = logging.getLogger(__name__)
PyCF_ONLY_AST = 1024
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
SKIP_TOKENS = frozenset([tokenize.NL, tokenize.NEWLINE, tokenize.INDENT,
tokenize.DEDENT])
class FileProcessor(object):
"""Processes a file and holdes state.
This processes a file by generating tokens, logical and physical lines,
and AST trees. This also provides a way of passing state about the file
to checks expecting that state. Any public attribute on this object can
be requested by a plugin. The known public attributes are:
- :attr:`blank_before`
- :attr:`blank_lines`
- :attr:`checker_state`
- :attr:`indent_char`
- :attr:`indent_level`
- :attr:`line_number`
- :attr:`logical_line`
- :attr:`max_line_length`
- :attr:`multiline`
- :attr:`noqa`
- :attr:`previous_indent_level`
- :attr:`previous_logical`
- :attr:`tokens`
- :attr:`total_lines`
- :attr:`verbose`
"""
def __init__(self, filename, options, lines=None):
"""Initialice our file processor.
:param str filename:
Name of the file to process
"""
self.options = options
self.filename = filename
self.lines = lines
if lines is None:
self.lines = self.read_lines()
self.strip_utf_bom()
# Defaults for public attributes
#: Number of preceding blank lines
self.blank_before = 0
#: Number of blank lines
self.blank_lines = 0
#: Checker states for each plugin?
self._checker_states = {}
#: Current checker state
self.checker_state = None
#: User provided option for hang closing
self.hang_closing = options.hang_closing
#: Character used for indentation
self.indent_char = None
#: Current level of indentation
self.indent_level = 0
#: Line number in the file
self.line_number = 0
#: Current logical line
self.logical_line = ''
#: Maximum line length as configured by the user
self.max_line_length = options.max_line_length
#: Whether the current physical line is multiline
self.multiline = False
#: Whether or not we're observing NoQA
self.noqa = False
#: Previous level of indentation
self.previous_indent_level = 0
#: Previous logical line
self.previous_logical = ''
#: Current set of tokens
self.tokens = []
#: Total number of lines in the file
self.total_lines = len(self.lines)
#: Verbosity level of Flake8
self.verbose = options.verbose
#: Statistics dictionary
self.statistics = {
'logical lines': 0,
}
@contextlib.contextmanager
def inside_multiline(self, line_number):
"""Context-manager to toggle the multiline attribute."""
self.line_number = line_number
self.multiline = True
yield
self.multiline = False
def reset_blank_before(self):
"""Reset the blank_before attribute to zero."""
self.blank_before = 0
def delete_first_token(self):
"""Delete the first token in the list of tokens."""
del self.tokens[0]
def visited_new_blank_line(self):
"""Note that we visited a new blank line."""
self.blank_lines += 1
def update_state(self, mapping):
"""Update the indent level based on the logical line mapping."""
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
def update_checker_state_for(self, plugin):
"""Update the checker_state attribute for the plugin."""
if 'checker_state' in plugin['parameters']:
self.checker_state = self._checker_states.setdefault(
plugin['name'], {}
)
def next_logical_line(self):
"""Record the previous logical line.
This also resets the tokens list and the blank_lines count.
"""
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.blank_lines = 0
self.tokens = []
self.noqa = False
def build_logical_line_tokens(self):
"""Build the mapping, comments, and logical line lists."""
logical = []
comments = []
length = 0
previous_row = previous_column = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mutate_string(text)
if previous_row:
(start_row, start_column) = start
if previous_row != start_row:
row_index = previous_row - 1
column_index = previous_column - 1
previous_text = self.lines[row_index][column_index]
if (previous_text == ',' or
(previous_text not in '{[(' and
text not in '}])')):
text = ' ' + text
elif previous_column != start_column:
text = line[previous_column:start_column] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(previous_row, previous_column) = end
return comments, logical, mapping
def build_ast(self):
"""Build an abstract syntax tree from the list of lines."""
return compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
def build_logical_line(self):
"""Build a logical line from the current tokens list."""
comments, logical, mapping_list = self.build_logical_line_tokens()
joined_comments = ''.join(comments)
self.logical_line = ''.join(logical)
if defaults.NOQA_INLINE_REGEXP.search(joined_comments):
self.noqa = True
self.statistics['logical lines'] += 1
return joined_comments, self.logical_line, mapping_list
def split_line(self, token):
"""Split a physical line's line based on new-lines.
This also auto-increments the line number for the caller.
"""
for line in token[1].split('\n')[:-1]:
yield line
self.line_number += 1
def keyword_arguments_for(self, parameters, arguments=None):
"""Generate the keyword arguments for a list of parameters."""
if arguments is None:
arguments = {}
for param, required in parameters.items():
if param in arguments:
continue
try:
arguments[param] = getattr(self, param)
except AttributeError as exc:
if required:
LOG.exception(exc)
raise
else:
LOG.warning('Plugin requested optional parameter "%s" '
'but this is not an available parameter.',
param)
return arguments
def check_physical_error(self, error_code, line):
"""Update attributes based on error code and line."""
if error_code == 'E101':
self.indent_char = line[0]
def generate_tokens(self):
"""Tokenize the file and yield the tokens.
:raises flake8.exceptions.InvalidSyntax:
If a :class:`tokenize.TokenError` is raised while generating
tokens.
"""
try:
for token in tokenize.generate_tokens(self.next_line):
if token[2][0] > self.total_lines:
break
self.tokens.append(token)
yield token
except (tokenize.TokenError, SyntaxError) as exc:
raise exceptions.InvalidSyntax(exception=exc)
def line_for(self, line_number):
"""Retrieve the physical line at the specified line number."""
adjusted_line_number = line_number - 1
# NOTE(sigmavirus24): Some plugins choose to report errors for empty
# files on Line 1. In those casese, we shouldn't bother trying to
# retrieve a physical line (since none exist).
if 0 <= adjusted_line_number < len(self.lines):
return self.lines[adjusted_line_number]
return None
def next_line(self):
"""Get the next line from the list."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in defaults.WHITESPACE:
self.indent_char = line[0]
return line
def read_lines(self):
# type: () -> List[str]
"""Read the lines for this file checker."""
if self.filename is None or self.filename == '-':
self.filename = self.options.stdin_display_name
lines = self.read_lines_from_stdin()
else:
lines = self.read_lines_from_filename()
return lines
def _readlines_py2(self):
# type: () -> List[str]
with open(self.filename, 'rU') as fd:
return fd.readlines()
def _readlines_py3(self):
# type: () -> List[str]
try:
with open(self.filename, 'rb') as fd:
(coding, lines) = tokenize.detect_encoding(fd.readline)
textfd = io.TextIOWrapper(fd, coding, line_buffering=True)
return ([l.decode(coding) for l in lines] +
textfd.readlines())
except (LookupError, SyntaxError, UnicodeError):
# If we can't detect the codec with tokenize.detect_encoding, or
# the detected encoding is incorrect, just fallback to latin-1.
with open(self.filename, encoding='latin-1') as fd:
return fd.readlines()
def read_lines_from_filename(self):
# type: () -> List[str]
"""Read the lines for a file."""
if (2, 6) <= sys.version_info < (3, 0):
readlines = self._readlines_py2
elif (3, 0) <= sys.version_info < (4, 0):
readlines = self._readlines_py3
return readlines()
def read_lines_from_stdin(self):
# type: () -> List[str]
"""Read the lines from standard in."""
return utils.stdin_get_value().splitlines(True)
def should_ignore_file(self):
# type: () -> bool
"""Check if ``# flake8: noqa`` is in the file to be ignored.
:returns:
True if a line matches :attr:`defaults.NOQA_FILE`,
otherwise False
:rtype:
bool
"""
ignore_file = defaults.NOQA_FILE.search
return any(ignore_file(line) for line in self.lines)
def strip_utf_bom(self):
# type: () -> NoneType
"""Strip the UTF bom from the lines of the file."""
if not self.lines:
# If we have nothing to analyze quit early
return
first_byte = ord(self.lines[0][0])
if first_byte not in (0xEF, 0xFEFF):
return
# If the first byte of the file is a UTF-8 BOM, strip it
if first_byte == 0xFEFF:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xEF\xBB\xBF':
self.lines[0] = self.lines[0][3:]
def is_eol_token(token):
"""Check if the token is an end-of-line token."""
return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n'
if COMMENT_WITH_NL: # If on Python 2.6
def is_eol_token(token, _is_eol_token=is_eol_token):
"""Check if the token is an end-of-line token."""
return (_is_eol_token(token) or
(token[0] == tokenize.COMMENT and token[1] == token[4]))
def is_multiline_string(token):
"""Check if this is a multiline string."""
return token[0] == tokenize.STRING and '\n' in token[1]
def token_is_newline(token):
"""Check if the token type is a newline token type."""
return token[0] in NEWLINE
def token_is_comment(token):
"""Check if the token type is a comment."""
return COMMENT_WITH_NL and token[0] == tokenize.COMMENT
def count_parentheses(current_parentheses_count, token_text):
"""Count the number of parentheses."""
current_parentheses_count = current_parentheses_count or 0
if token_text in '([{':
return current_parentheses_count + 1
elif token_text in '}])':
return current_parentheses_count - 1
return current_parentheses_count
def log_token(log, token):
"""Log a token to a provided logging object."""
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
log.log(flake8._EXTRA_VERBOSE, 'l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]],
token[1]))
# NOTE(sigmavirus24): This was taken wholesale from
# https://github.com/PyCQA/pycodestyle
def expand_indent(line):
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
# NOTE(sigmavirus24): This was taken wholesale from
# https://github.com/PyCQA/pycodestyle. The in-line comments were edited to be
# more descriptive.
def mutate_string(text):
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# NOTE(sigmavirus24): If there are string modifiers (e.g., b, u, r)
# use the last "character" to determine if we're using single or double
# quotes and then find the first instance of it
start = text.index(text[-1]) + 1
end = len(text) - 1
# Check for triple-quoted strings
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
| {
"content_hash": "83e484136a65f1263a102f881d8c6dd1",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 78,
"avg_line_length": 34.87899543378995,
"alnum_prop": 0.5729528048700661,
"repo_name": "Aorjoa/aiyara-ceph-dash",
"id": "44024e5aeebe7d050b3fd20d60652674a44a2ff8",
"size": "15277",
"binary": false,
"copies": "1",
"ref": "refs/heads/aiyara",
"path": ".tox/flake8/lib/python2.7/site-packages/flake8/processor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "29951"
},
{
"name": "Groff",
"bytes": "17679"
},
{
"name": "HTML",
"bytes": "12219"
},
{
"name": "JavaScript",
"bytes": "218064"
},
{
"name": "Python",
"bytes": "6880049"
},
{
"name": "Shell",
"bytes": "6504"
}
],
"symlink_target": ""
} |
"""Central place for package metadata."""
from pkg_resources import DistributionNotFound, get_distribution
__name__ = "resolwe-runtime-utils"
__title__ = "Resolwe Runtime Utilities"
__summary__ = "Runtime utilities for Resolwe dataflow engine"
__url__ = "https://github.com/genialis/resolwe-runtime-utils"
__git_repo_url__ = __url__
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# Package is not (yet) installed.
pass
__author__ = "Genialis, Inc."
__email__ = "[email protected]"
__license__ = "Apache License (2.0)"
__copyright__ = "2015-2019, " + __author__
| {
"content_hash": "b950c51eedaff4b29551535f772be3fe",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6747572815533981,
"repo_name": "genialis/resolwe-runtime-utils",
"id": "67c72bb8067bd28eff32b9f8b5f78d4ce1706941",
"size": "1197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__about__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71550"
}
],
"symlink_target": ""
} |
import asyncio
from unittest import mock
from zope.interface import implementer
from aiorm import registry
from aiorm.interfaces import IDriver
class DummyCursor:
last_query = None
last_parameters = None
# set many query results in those vars
return_many = [None,]
return_one = [None]
@asyncio.coroutine
def execute(self, query, parameters=None):
DummyCursor.last_query = query
DummyCursor.last_parameters = parameters
@asyncio.coroutine
def fetchone(self):
return DummyCursor.return_one.pop(0)
@asyncio.coroutine
def fetchall(self):
return DummyCursor.return_many.pop(0)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
@implementer(IDriver)
class DummyDriver:
last_url = None
connected = False
released = True
mock = DummyCursor()
@asyncio.coroutine
def connect(self, url):
""" create the driver and connect from the given url """
self.__class__.last_url = url
self.__class__.connected = True
self.database = url.rsplit('/', 2).pop()
@asyncio.coroutine
def disconnect(self):
self.__class__.connected = False
@asyncio.coroutine
def cursor(self, timeout=None):
return self.__class__.mock
@asyncio.coroutine
def acquire(self):
self.__class__.released = False
return self
@asyncio.coroutine
def release(self):
self.__class__.released = True
class DriverFixture:
@classmethod
def setUpClass(cls):
registry.register(DummyDriver)
@classmethod
def tearDownClass(cls):
registry.unregister(DummyDriver)
| {
"content_hash": "6cc957c233d5030fac62fb04adf423b7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 64,
"avg_line_length": 22.460526315789473,
"alnum_prop": 0.6438195664909198,
"repo_name": "mardiros/aiorm",
"id": "726803c213e7e94223a387055c524157855d2139",
"size": "1708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiorm/tests/fixtures/driver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111683"
}
],
"symlink_target": ""
} |
"""
Use this module to contain static data. Static data is directly used by the
codebase and should never be changed without a commit, as opposed to data
stored in the database that can change at any moment and whose values are not
necessarily constant.
Note: DO NOT re-use enum values unless you know exactly what you are doing!
"""
import enum
# Enum for permissions available to users.
class Permissions(enum.IntEnum):
# Site admins: always has access to everything
ADMIN = 1
# Add, edit, or delete user data
USERS = 2
# Run the room hassle
HASSLE = 3
# Manage mailing lists
EMAIL = 4
# Run rotation meetings
ROTATION = 5
# Add, edit, or delete expenses
BUDGET = 6
# See birthday list
BIRTHDAYS = 7
# Enum for search modes.
class MemberSearchMode(enum.IntEnum):
# Everyone
ALL = 1
# Current members
CURRENT = 2
# Alumni
ALUMNI = 3
| {
"content_hash": "94241a6d6bf040c5732ba4057c73acf2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 24.38888888888889,
"alnum_prop": 0.7198177676537585,
"repo_name": "RuddockHouse/RuddockWebsite",
"id": "8435c3b98ab4d7a523104a862ca79f118b8e3c15",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ruddock/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7151"
},
{
"name": "HTML",
"bytes": "63834"
},
{
"name": "JavaScript",
"bytes": "7647"
},
{
"name": "Python",
"bytes": "150560"
}
],
"symlink_target": ""
} |
import ast
import math
import os.path
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import fields
from cinder.volume.drivers.emc import emc_vmax_fast
from cinder.volume.drivers.emc import emc_vmax_https
from cinder.volume.drivers.emc import emc_vmax_masking
from cinder.volume.drivers.emc import emc_vmax_provision
from cinder.volume.drivers.emc import emc_vmax_provision_v3
from cinder.volume.drivers.emc import emc_vmax_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml'
CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_'
CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml'
BACKENDNAME = 'volume_backend_name'
PREFIXBACKENDNAME = 'capabilities:volume_backend_name'
PORTGROUPNAME = 'portgroupname'
EMC_ROOT = 'root/emc'
POOL = 'storagetype:pool'
ARRAY = 'storagetype:array'
FASTPOLICY = 'storagetype:fastpolicy'
BACKENDNAME = 'volume_backend_name'
COMPOSITETYPE = 'storagetype:compositetype'
STRIPECOUNT = 'storagetype:stripecount'
MEMBERCOUNT = 'storagetype:membercount'
STRIPED = 'striped'
CONCATENATED = 'concatenated'
SMI_VERSION_8 = 800
# V3
SLO = 'storagetype:slo'
WORKLOAD = 'storagetype:workload'
INTERVAL = 'storagetype:interval'
RETRIES = 'storagetype:retries'
ISV3 = 'isV3'
TRUNCATE_5 = 5
TRUNCATE_8 = 8
SNAPVX = 7
DISSOLVE_SNAPVX = 9
CREATE_NEW_TARGET = 2
SNAPVX_REPLICATION_TYPE = 6
emc_opts = [
cfg.StrOpt('cinder_emc_config_file',
default=CINDER_EMC_CONFIG_FILE,
help='use this file for cinder emc plugin '
'config data'), ]
CONF.register_opts(emc_opts)
class EMCVMAXCommon(object):
"""Common class for SMI-S based EMC volume drivers.
This common class is for EMC volume drivers based on SMI-S.
It supports VNX and VMAX arrays.
"""
VERSION = "2.0.0"
stats = {'driver_version': '1.0',
'free_capacity_gb': 0,
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 0,
'vendor_name': 'EMC',
'volume_backend_name': None}
pool_info = {'backend_name': None,
'config_file': None,
'arrays_info': {},
'max_over_subscription_ratio': None,
'reserved_percentage': None
}
def __init__(self, prtcl, version, configuration=None):
if not pywbemAvailable:
LOG.info(_LI(
"Module PyWBEM not installed. "
"Install PyWBEM using the python-pywbem package."))
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values(emc_opts)
self.conn = None
self.url = None
self.user = None
self.passwd = None
self.masking = emc_vmax_masking.EMCVMAXMasking(prtcl)
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl)
self.version = version
self._gather_info()
def _gather_info(self):
"""Gather the relevant information for update_volume_stats."""
if hasattr(self.configuration, 'cinder_emc_config_file'):
self.pool_info['config_file'] = (
self.configuration.cinder_emc_config_file)
else:
self.pool_info['config_file'] = (
self.configuration.safe_get('cinder_emc_config_file'))
self.pool_info['backend_name'] = (
self.configuration.safe_get('volume_backend_name'))
self.pool_info['max_over_subscription_ratio'] = (
self.configuration.safe_get('max_over_subscription_ratio'))
self.pool_info['reserved_percentage'] = (
self.configuration.safe_get('reserved_percentage'))
LOG.debug(
"Updating volume stats on file %(emcConfigFileName)s on "
"backend %(backendName)s.",
{'emcConfigFileName': self.pool_info['config_file'],
'backendName': self.pool_info['backend_name']})
self.pool_info['arrays_info'] = (
self.utils.parse_file_to_get_array_map(
self.pool_info['config_file']))
def create_volume(self, volume):
"""Creates a EMC(VMAX) volume from a pre-existing storage pool.
For a concatenated compositeType:
If the volume size is over 240GB then a composite is created
EMCNumberOfMembers > 1, otherwise it defaults to a non composite
For a striped compositeType:
The user must supply an extra spec to determine how many metas
will make up the striped volume. If the meta size is greater
than 240GB an error is returned to the user. Otherwise the
EMCNumberOfMembers is what the user specifies.
:param volume: volume Object
:returns: dict -- volumeDict - the volume dictionary
"""
volumeSize = int(self.utils.convert_gb_to_bits(volume['size']))
volumeName = volume['id']
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
if extraSpecs[ISV3]:
rc, volumeDict, storageSystemName = (
self._create_v3_volume(volume, volumeName, volumeSize,
extraSpecs))
else:
rc, volumeDict, storageSystemName = (
self._create_composite_volume(volume, volumeName, volumeSize,
extraSpecs))
# If volume is created as part of a consistency group.
if 'consistencygroup_id' in volume and volume['consistencygroup_id']:
cgName = self.utils.truncate_string(
volume['consistencygroup_id'], 8)
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
replicationService = (
self.utils.find_replication_service(self.conn,
storageSystemName))
cgInstanceName = (
self._find_consistency_group(replicationService, cgName))
self.provision.add_volume_to_cg(self.conn,
replicationService,
cgInstanceName,
volumeInstance.path,
cgName,
volumeName,
extraSpecs)
LOG.info(_LI("Leaving create_volume: %(volumeName)s "
"Return code: %(rc)lu "
"volume dict: %(name)s."),
{'volumeName': volumeName,
'rc': rc,
'name': volumeDict})
# Adding version information
volumeDict['version'] = self.version
return volumeDict
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
For VMAX, replace snapshot with clone.
:param volume: volume Object
:param snapshot: snapshot object
:returns: dict -- the cloned volume dictionary
:raises: VolumeBackendAPIException
"""
LOG.debug("Entering create_volume_from_snapshot.")
snapshot['host'] = volume['host']
extraSpecs = self._initial_setup(snapshot)
self.conn = self._get_ecom_connection()
snapshotInstance = self._find_lun(snapshot)
storageSystem = snapshotInstance['SystemName']
syncName = self.utils.find_sync_sv_by_target(
self.conn, storageSystem, snapshotInstance, extraSpecs, True)
if syncName is not None:
repservice = self.utils.find_replication_service(self.conn,
storageSystem)
if repservice is None:
exception_message = (_("Cannot find Replication Service to "
"create volume for snapshot %s.")
% snapshotInstance)
raise exception.VolumeBackendAPIException(
data=exception_message)
self.provision.delete_clone_relationship(
self.conn, repservice, syncName, extraSpecs)
snapshot['host'] = volume['host']
return self._create_cloned_volume(volume, snapshot, extraSpecs, False)
def create_cloned_volume(self, cloneVolume, sourceVolume):
"""Creates a clone of the specified volume.
:param cloneVolume: clone volume Object
:param sourceVolume: volume object
:returns: cloneVolumeDict -- the cloned volume dictionary
"""
extraSpecs = self._initial_setup(sourceVolume)
return self._create_cloned_volume(cloneVolume, sourceVolume,
extraSpecs, False)
def delete_volume(self, volume):
"""Deletes a EMC(VMAX) volume.
:param volume: volume Object
"""
LOG.info(_LI("Deleting Volume: %(volume)s"),
{'volume': volume['name']})
rc, volumeName = self._delete_volume(volume)
LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: "
"%(rc)lu."),
{'volumename': volumeName,
'rc': rc})
def create_snapshot(self, snapshot, volume):
"""Creates a snapshot.
For VMAX, replace snapshot with clone.
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
:returns: dict -- the cloned volume dictionary
"""
extraSpecs = self._initial_setup(volume)
return self._create_cloned_volume(snapshot, volume, extraSpecs, True)
def delete_snapshot(self, snapshot, volume):
"""Deletes a snapshot.
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
"""
LOG.info(_LI("Delete Snapshot: %(snapshotName)s."),
{'snapshotName': snapshot['name']})
snapshot['host'] = volume['host']
self._delete_snapshot(snapshot)
def _remove_members(self, controllerConfigService,
volumeInstance, connector, extraSpecs):
"""This method unmaps a volume from a host.
Removes volume from the Device Masking Group that belongs to
a Masking View.
Check if fast policy is in the extra specs. If it isn't we do
not need to do any thing for FAST.
Assume that isTieringPolicySupported is False unless the FAST
policy is in the extra specs and tiering is enabled on the array.
:param controllerConfigService: instance name of
ControllerConfigurationService
:param volumeInstance: volume Object
:param connector: the connector object
:param extraSpecs: extra specifications
:returns: storageGroupInstanceName
"""
volumeName = volumeInstance['ElementName']
LOG.debug("Detaching volume %s.", volumeName)
return self.masking.remove_and_reset_members(
self.conn, controllerConfigService, volumeInstance,
volumeName, extraSpecs, connector)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.
:param volume: the volume Object
:param connector: the connector Object
:raises: VolumeBackendAPIException
"""
extraSpecs = self._initial_setup(volume)
volumename = volume['name']
LOG.info(_LI("Unmap volume: %(volume)s."),
{'volume': volumename})
device_info = self.find_device_number(volume, connector['host'])
if 'hostlunid' not in device_info:
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
volumename)
return
vol_instance = self._find_lun(volume)
storage_system = vol_instance['SystemName']
configservice = self.utils.find_controller_configuration_service(
self.conn, storage_system)
if configservice is None:
exception_message = (_("Cannot find Controller Configuration "
"Service for storage system "
"%(storage_system)s.")
% {'storage_system': storage_system})
raise exception.VolumeBackendAPIException(data=exception_message)
self._remove_members(configservice, vol_instance, connector,
extraSpecs)
livemigrationrecord = self.utils.get_live_migration_record(volume,
False)
if livemigrationrecord:
live_maskingviewdict = livemigrationrecord[0]
live_connector = livemigrationrecord[1]
live_extraSpecs = livemigrationrecord[2]
self._attach_volume(
volume, live_connector, live_extraSpecs,
live_maskingviewdict, True)
self.utils.delete_live_migration_record(volume)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns device and connection info.
The volume may be already mapped, if this is so the deviceInfo tuple
is returned. If the volume is not already mapped then we need to
gather information to either 1. Create an new masking view or 2. Add
the volume to an existing storage group within an already existing
maskingview.
The naming convention is the following:
.. code-block:: none
initiatorGroupName = OS-<shortHostName>-<shortProtocol>-IG
e.g OS-myShortHost-I-IG
storageGroupName = OS-<shortHostName>-<poolName>-<shortProtocol>-SG
e.g OS-myShortHost-SATA_BRONZ1-I-SG
portGroupName = OS-<target>-PG The portGroupName will come from
the EMC configuration xml file.
These are precreated. If the portGroup does not
exist then an error will be returned to the user
maskingView = OS-<shortHostName>-<poolName>-<shortProtocol>-MV
e.g OS-myShortHost-SATA_BRONZ1-I-MV
:param volume: volume Object
:param connector: the connector Object
:returns: dict -- deviceInfoDict - device information dict
:raises: VolumeBackendAPIException
"""
portGroupName = None
extraSpecs = self._initial_setup(volume)
is_multipath = connector.get('multipath', False)
volumeName = volume['name']
LOG.info(_LI("Initialize connection: %(volume)s."),
{'volume': volumeName})
self.conn = self._get_ecom_connection()
deviceInfoDict = self._wrap_find_device_number(
volume, connector['host'])
maskingViewDict = self._populate_masking_dict(
volume, connector, extraSpecs)
if ('hostlunid' in deviceInfoDict and
deviceInfoDict['hostlunid'] is not None):
isSameHost = self._is_same_host(connector, deviceInfoDict)
if isSameHost:
# Device is already mapped to same host so we will leave
# the state as is.
deviceNumber = deviceInfoDict['hostlunid']
LOG.info(_LI("Volume %(volume)s is already mapped. "
"The device number is %(deviceNumber)s."),
{'volume': volumeName,
'deviceNumber': deviceNumber})
# Special case, we still need to get the iscsi ip address.
portGroupName = (
self._get_correct_port_group(
deviceInfoDict, maskingViewDict['storageSystemName']))
else:
deviceInfoDict, portGroupName = self._attach_volume(
volume, connector, extraSpecs, maskingViewDict, True)
else:
deviceInfoDict, portGroupName = (
self._attach_volume(
volume, connector, extraSpecs, maskingViewDict))
if self.protocol.lower() == 'iscsi':
deviceInfoDict['ip_and_iqn'] = (
self._find_ip_protocol_endpoints(
self.conn, deviceInfoDict['storagesystem'],
portGroupName))
deviceInfoDict['is_multipath'] = is_multipath
return deviceInfoDict
def _attach_volume(self, volume, connector, extraSpecs,
maskingViewDict, isLiveMigration=False):
"""Attach a volume to a host.
If live migration is being undertaken then the volume
remains attached to the source host.
:params volume: the volume object
:params connector: the connector object
:param extraSpecs: extra specifications
:param maskingViewDict: masking view information
:param isLiveMigration: boolean, can be None
:returns: dict -- deviceInfoDict
String -- port group name
:raises: VolumeBackendAPIException
"""
volumeName = volume['name']
maskingViewDict = self._populate_masking_dict(
volume, connector, extraSpecs)
if isLiveMigration:
maskingViewDict['isLiveMigration'] = True
self.utils.insert_live_migration_record(volume, maskingViewDict,
connector, extraSpecs)
else:
maskingViewDict['isLiveMigration'] = False
rollbackDict = self.masking.setup_masking_view(
self.conn, maskingViewDict, extraSpecs)
# Find host lun id again after the volume is exported to the host.
deviceInfoDict = self.find_device_number(volume, connector['host'])
if 'hostlunid' not in deviceInfoDict:
# Did not successfully attach to host,
# so a rollback for FAST is required.
LOG.error(_LE("Error Attaching volume %(vol)s."),
{'vol': volumeName})
if ((rollbackDict['fastPolicyName'] is not None) or
(rollbackDict['isV3'] is not None)):
(self.masking._check_if_rollback_action_for_masking_required(
self.conn, rollbackDict))
self.utils.delete_live_migration_record(volume)
exception_message = (_("Error Attaching volume %(vol)s.")
% {'vol': volumeName})
raise exception.VolumeBackendAPIException(
data=exception_message)
return deviceInfoDict, rollbackDict['pgGroupName']
def _is_same_host(self, connector, deviceInfoDict):
"""Check if the host is the same.
Check if the host to attach to is the same host
that is already attached. This is necessary for
live migration.
:params connector: the connector object
:params deviceInfoDict: the device information dictionary
:returns: boolean -- True if the host is the same, False otherwise.
"""
if 'host' in connector:
currentHost = connector['host']
if ('maskingview' in deviceInfoDict and
deviceInfoDict['maskingview'] is not None):
if currentHost in deviceInfoDict['maskingview']:
return True
return False
def _get_correct_port_group(self, deviceInfoDict, storageSystemName):
"""Get the portgroup name from the existing masking view.
:params deviceInfoDict: the device info dictionary
:params storageSystemName: storage system name
:returns: String port group name
"""
if ('controller' in deviceInfoDict and
deviceInfoDict['controller'] is not None):
maskingViewInstanceName = deviceInfoDict['controller']
try:
maskingViewInstance = (
self.conn.GetInstance(maskingViewInstanceName))
except Exception:
exception_message = (_("Unable to get the name of "
"the masking view."))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Get the portgroup from masking view
portGroupInstanceName = (
self.masking._get_port_group_from_masking_view(
self.conn,
maskingViewInstance['ElementName'],
storageSystemName))
try:
portGroupInstance = (
self.conn.GetInstance(portGroupInstanceName))
portGroupName = (
portGroupInstance['ElementName'])
except Exception:
exception_message = (_("Unable to get the name of "
"the portgroup."))
raise exception.VolumeBackendAPIException(
data=exception_message)
else:
exception_message = (_("Cannot get the portgroup from "
"the masking view."))
raise exception.VolumeBackendAPIException(
data=exception_message)
return portGroupName
def check_ig_instance_name(self, initiatorGroupInstanceName):
"""Check if an initiator group instance is on the array.
:param initiatorGroupInstanceName: initiator group instance name
:returns: initiator group name, or None if deleted
"""
return self.utils.check_ig_instance_name(
self.conn, initiatorGroupInstanceName)
def terminate_connection(self, volume, connector):
"""Disallow connection from connector.
:params volume: the volume Object
:params connector: the connector Object
"""
volumename = volume['name']
LOG.info(_LI("Terminate connection: %(volume)s."),
{'volume': volumename})
self._unmap_lun(volume, connector)
def extend_volume(self, volume, newSize):
"""Extends an existing volume.
Prequisites:
1. The volume must be composite e.g StorageVolume.EMCIsComposite=True
2. The volume can only be concatenated
e.g StorageExtent.IsConcatenated=True
:params volume: the volume Object
:params newSize: the new size to increase the volume to
:returns: dict -- modifiedVolumeDict - the extended volume Object
:raises: VolumeBackendAPIException
"""
originalVolumeSize = volume['size']
volumeName = volume['name']
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
exceptionMessage = (_("Cannot find Volume: %(volumename)s. "
"Extend operation. Exiting....")
% {'volumename': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return self._extend_volume(
volumeInstance, volumeName, newSize, originalVolumeSize,
extraSpecs)
def _extend_volume(
self, volumeInstance, volumeName, newSize, originalVolumeSize,
extraSpecs):
"""Extends an existing volume.
:params volumeInstance: the volume Instance
:params volumeName: the volume name
:params newSize: the new size to increase the volume to
:params originalVolumeSize: the original size
:params extraSpecs: extra specifications
:returns: dict -- modifiedVolumeDict - the extended volume Object
:raises: VolumeBackendAPIException
"""
if int(originalVolumeSize) > int(newSize):
exceptionMessage = (_(
"Your original size: %(originalVolumeSize)s GB is greater "
"than: %(newSize)s GB. Only Extend is supported. Exiting...")
% {'originalVolumeSize': originalVolumeSize,
'newSize': newSize})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
additionalVolumeSize = six.text_type(
int(newSize) - int(originalVolumeSize))
additionalVolumeSize = self.utils.convert_gb_to_bits(
additionalVolumeSize)
if extraSpecs[ISV3]:
rc, modifiedVolumeDict = self._extend_v3_volume(
volumeInstance, volumeName, newSize, extraSpecs)
else:
# This is V2.
rc, modifiedVolumeDict = self._extend_composite_volume(
volumeInstance, volumeName, newSize, additionalVolumeSize,
extraSpecs)
# Check the occupied space of the new extended volume.
extendedVolumeInstance = self.utils.find_volume_instance(
self.conn, modifiedVolumeDict, volumeName)
extendedVolumeSize = self.utils.get_volume_size(
self.conn, extendedVolumeInstance)
LOG.debug(
"The actual volume size of the extended volume: %(volumeName)s "
"is %(volumeSize)s.",
{'volumeName': volumeName,
'volumeSize': extendedVolumeSize})
# If the requested size and the actual size don't
# tally throw an exception.
newSizeBits = self.utils.convert_gb_to_bits(newSize)
diffVolumeSize = self.utils.compare_size(
newSizeBits, extendedVolumeSize)
if diffVolumeSize != 0:
exceptionMessage = (_(
"The requested size : %(requestedSize)s is not the same as "
"resulting size: %(resultSize)s.")
% {'requestedSize': newSizeBits,
'resultSize': extendedVolumeSize})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
LOG.debug(
"Leaving extend_volume: %(volumeName)s. "
"Return code: %(rc)lu, "
"volume dict: %(name)s.",
{'volumeName': volumeName,
'rc': rc,
'name': modifiedVolumeDict})
return modifiedVolumeDict
def update_volume_stats(self):
"""Retrieve stats info."""
pools = []
backendName = self.pool_info['backend_name']
max_oversubscription_ratio = (
self.pool_info['max_over_subscription_ratio'])
reservedPercentage = self.pool_info['reserved_percentage']
array_max_over_subscription = None
array_reserve_percent = None
for arrayInfo in self.pool_info['arrays_info']:
self._set_ecom_credentials(arrayInfo)
# Check what type of array it is
isV3 = self.utils.isArrayV3(self.conn, arrayInfo['SerialNumber'])
if isV3:
(location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb,
array_reserve_percent) = self._update_srp_stats(arrayInfo)
poolName = ("%(slo)s+%(poolName)s+%(array)s"
% {'slo': arrayInfo['SLO'],
'poolName': arrayInfo['PoolName'],
'array': arrayInfo['SerialNumber']})
else:
# This is V2
(location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_max_over_subscription) = (
self._update_pool_stats(backendName, arrayInfo))
poolName = ("%(poolName)s+%(array)s"
% {'poolName': arrayInfo['PoolName'],
'array': arrayInfo['SerialNumber']})
pool = {'pool_name': poolName,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'QoS_support': False,
'location_info': location_info,
'consistencygroup_support': True,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'max_over_subscription_ratio': max_oversubscription_ratio
}
if array_max_over_subscription:
pool['max_over_subscription_ratio'] = (
self.utils.override_ratio(
max_oversubscription_ratio,
array_max_over_subscription))
if array_reserve_percent and (
array_reserve_percent > reservedPercentage):
pool['reserved_percentage'] = array_reserve_percent
else:
pool['reserved_percentage'] = reservedPercentage
pools.append(pool)
data = {'vendor_name': "EMC",
'driver_version': self.version,
'storage_protocol': 'unknown',
'volume_backend_name': self.pool_info['backend_name'] or
self.__class__.__name__,
# Use zero capacities here so we always use a pool.
'total_capacity_gb': 0,
'free_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'reserved_percentage': 0,
'pools': pools}
return data
def _update_srp_stats(self, arrayInfo):
"""Update SRP stats.
:param arrayInfo: array information
:returns: location_info
:returns: totalManagedSpaceGbs
:returns: remainingManagedSpaceGbs
:returns: provisionedManagedSpaceGbs
:returns: array_reserve_percent
"""
(totalManagedSpaceGbs, remainingManagedSpaceGbs,
provisionedManagedSpaceGbs, array_reserve_percent) = (
self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo))
LOG.info(_LI(
"Capacity stats for SRP pool %(poolName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu, "
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu"),
{'poolName': arrayInfo['PoolName'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': totalManagedSpaceGbs,
'free_capacity_gb': remainingManagedSpaceGbs,
'provisioned_capacity_gb': provisionedManagedSpaceGbs})
location_info = ("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s"
% {'arrayName': arrayInfo['SerialNumber'],
'poolName': arrayInfo['PoolName'],
'slo': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
return (location_info, totalManagedSpaceGbs,
remainingManagedSpaceGbs, provisionedManagedSpaceGbs,
array_reserve_percent)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: The host dict holding the relevant target(destination)
information
:returns: boolean -- True if retype succeeded, False if error
"""
volumeName = volume['name']
volumeStatus = volume['status']
LOG.info(_LI("Migrating using retype Volume: %(volume)s."),
{'volume': volumeName})
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE("Volume %(name)s not found on the array. "
"No volume to migrate using retype."),
{'name': volumeName})
return False
if extraSpecs[ISV3]:
return self._slo_workload_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
new_type, extraSpecs)
else:
return self._pool_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
extraSpecs[FASTPOLICY],
new_type, extraSpecs)
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate volume to another host.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:param new_type: None
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
LOG.warning(_LW("The VMAX plugin only supports Retype. "
"If a pool based migration is necessary "
"this will happen on a Retype "
"From the command line: "
"cinder --os-volume-api-version 2 retype <volumeId> "
"<volumeType> --migration-policy on-demand"))
return True, {}
def _migrate_volume(
self, volume, volumeInstance, targetPoolName,
targetFastPolicyName, sourceFastPolicyName, extraSpecs,
new_type=None):
"""Migrate volume to another host.
:param volume: the volume object including the volume_type_id
:param volumeInstance: the volume instance
:param targetPoolName: the target poolName
:param targetFastPolicyName: the target FAST policy name, can be None
:param sourceFastPolicyName: the source FAST policy name, can be None
:param extraSpecs: extra specifications
:param new_type: None
:returns: boolean -- True/False
:returns: list -- empty list
"""
volumeName = volume['name']
storageSystemName = volumeInstance['SystemName']
sourcePoolInstanceName = self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstance.path)
moved, rc = self._migrate_volume_from(
volume, volumeInstance, targetPoolName, sourceFastPolicyName,
extraSpecs)
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
# group because the migrate was unsuccessful.
LOG.warning(_LW(
"Failed to migrate: %(volumeName)s from "
"default source storage group "
"for FAST policy: %(sourceFastPolicyName)s. "
"Attempting cleanup... "),
{'volumeName': volumeName,
'sourceFastPolicyName': sourceFastPolicyName})
if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstance.path):
self._migrate_cleanup(self.conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs)
else:
# Migrate was successful but still issues.
self._migrate_rollback(
self.conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, sourcePoolInstanceName,
extraSpecs)
return moved
if targetFastPolicyName == 'None':
targetFastPolicyName = None
if moved is True and targetFastPolicyName is not None:
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
LOG.warning(_LW(
"Attempting a rollback of: %(volumeName)s to "
"original pool %(sourcePoolInstanceName)s."),
{'volumeName': volumeName,
'sourcePoolInstanceName': sourcePoolInstanceName})
self._migrate_rollback(
self.conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, sourcePoolInstanceName,
extraSpecs)
if rc == 0:
moved = True
return moved
def _migrate_rollback(self, conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
volumeName, sourcePoolInstanceName, extraSpecs):
"""Full rollback.
Failed on final step on adding migrated volume to new target
default storage group for the target FAST policy.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param sourcePoolInstanceName: the instance name of the source pool
:param extraSpecs: extra specifications
"""
LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
{'volumeName': volumeName})
storageRelocationService = self.utils.find_storage_relocation_service(
conn, storageSystemName)
try:
self.provision.migrate_volume_to_storage_pool(
conn, storageRelocationService, volumeInstance.path,
sourcePoolInstanceName, extraSpecs)
except Exception:
LOG.error(_LE(
"Failed to return volume %(volumeName)s to "
"original storage pool. Please contact your system "
"administrator to return it to the correct location."),
{'volumeName': volumeName})
if sourceFastPolicyName is not None:
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs)
def _migrate_cleanup(self, conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs):
"""If the migrate fails, put volume back to source FAST SG.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param extraSpecs: extra specifications
:returns: boolean -- True/False
"""
LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
{'volumeName': volumeName})
return_to_default = True
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
# Check to see what SG it is in.
assocStorageGroupInstanceNames = (
self.utils.get_storage_groups_from_volume(conn,
volumeInstance.path))
# This is the SG it should be in.
defaultStorageGroupInstanceName = (
self.fast.get_policy_default_storage_group(
conn, controllerConfigurationService, sourceFastPolicyName))
for assocStorageGroupInstanceName in assocStorageGroupInstanceNames:
# It is in the incorrect storage group.
if (assocStorageGroupInstanceName !=
defaultStorageGroupInstanceName):
self.provision.remove_device_from_storage_group(
conn, controllerConfigurationService,
assocStorageGroupInstanceName,
volumeInstance.path, volumeName, extraSpecs)
else:
# The volume is already in the default.
return_to_default = False
if return_to_default:
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs)
return return_to_default
def _migrate_volume_fast_target(
self, volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
"""If the target host is FAST enabled.
If the target host is FAST enabled then we need to add it to the
default storage group for that policy.
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param targetFastPolicyName: the target fast policy name
:param volumeName: the volume name
:param extraSpecs: extra specifications
:returns: boolean -- True/False
"""
falseRet = False
LOG.info(_LI(
"Adding volume: %(volumeName)s to default storage group "
"for FAST policy: %(fastPolicyName)s."),
{'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigurationService,
targetFastPolicyName, volumeInstance, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Unable to create or get default storage group for FAST policy"
": %(fastPolicyName)s."),
{'fastPolicyName': targetFastPolicyName})
return falseRet
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
volumeName, targetFastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Failed to verify that volume was added to storage group for "
"FAST policy: %(fastPolicyName)s."),
{'fastPolicyName': targetFastPolicyName})
return falseRet
return True
def _migrate_volume_from(self, volume, volumeInstance,
targetPoolName, sourceFastPolicyName,
extraSpecs):
"""Check FAST policies and migrate from source pool.
:param volume: the volume object including the volume_type_id
:param volumeInstance: the volume instance
:param targetPoolName: the target poolName
:param sourceFastPolicyName: the source FAST policy name, can be None
:param extraSpecs: extra specifications
:returns: boolean -- True/False
:returns: int -- the return code from migrate operation
"""
falseRet = (False, -1)
volumeName = volume['name']
storageSystemName = volumeInstance['SystemName']
LOG.debug("sourceFastPolicyName is : %(sourceFastPolicyName)s.",
{'sourceFastPolicyName': sourceFastPolicyName})
# If the source volume is FAST enabled it must first be removed
# from the default storage group for that policy.
if sourceFastPolicyName is not None:
self.remove_from_default_SG(
self.conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, extraSpecs)
# Migrate from one pool to another.
storageRelocationService = self.utils.find_storage_relocation_service(
self.conn, storageSystemName)
targetPoolInstanceName = self.utils.get_pool_by_name(
self.conn, targetPoolName, storageSystemName)
if targetPoolInstanceName is None:
LOG.error(_LE(
"Error finding target pool instance name for pool: "
"%(targetPoolName)s."),
{'targetPoolName': targetPoolName})
return falseRet
try:
rc = self.provision.migrate_volume_to_storage_pool(
self.conn, storageRelocationService, volumeInstance.path,
targetPoolInstanceName, extraSpecs)
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.exception(_LE(
"Error migrating volume: %(volumename)s. "
"to target pool %(targetPoolName)s."),
{'volumename': volumeName,
'targetPoolName': targetPoolName})
return falseRet
# Check that the volume is now migrated to the correct storage pool,
# if it is terminate the migrate session.
foundPoolInstanceName = self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstance.path)
if (foundPoolInstanceName is None or
(foundPoolInstanceName['InstanceID'] !=
targetPoolInstanceName['InstanceID'])):
LOG.error(_LE(
"Volume : %(volumeName)s. was not successfully migrated to "
"target pool %(targetPoolName)s."),
{'volumeName': volumeName,
'targetPoolName': targetPoolName})
return falseRet
else:
LOG.debug("Terminating migration session on: %(volumeName)s.",
{'volumeName': volumeName})
self.provision._terminate_migrate_session(
self.conn, volumeInstance.path, extraSpecs)
if rc == 0:
moved = True
return moved, rc
def remove_from_default_SG(
self, conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, extraSpecs):
"""For FAST, remove volume from default storage group.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
try:
defaultStorageGroupInstanceName = (
self.masking.remove_device_from_default_storage_group(
conn, controllerConfigurationService,
volumeInstance.path, volumeName, sourceFastPolicyName,
extraSpecs))
except Exception:
exceptionMessage = (_(
"Failed to remove: %(volumename)s. "
"from the default storage group for "
"FAST policy %(fastPolicyName)s.")
% {'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
"The volume: %(volumename)s "
"was not first part of the default storage "
"group for FAST policy %(fastPolicyName)s."),
{'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
def add_to_default_SG(
self, conn, volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
"""For FAST, add volume to default storage group.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param targetFastPolicyName: the target FAST policy name
:param volumeName: the volume Name
:param extraSpecs: extra specifications
"""
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
assocDefaultStorageGroupName = (
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigurationService, volumeInstance,
volumeName, targetFastPolicyName, extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to add %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s."),
{'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
def _is_valid_for_storage_assisted_migration_v3(
self, volumeInstanceName, host, sourceArraySerialNumber,
sourcePoolName, volumeName, volumeStatus, sgName):
"""Check if volume is suitable for storage assisted (pool) migration.
:param volumeInstanceName: the volume instance id
:param host: the host object
:param sourceArraySerialNumber: the array serial number of
the original volume
:param sourcePoolName: the pool name of the original volume
:param volumeName: the name of the volume to be migrated
:param volumeStatus: the status of the volume
:param sgName: storage group name
:returns: boolean -- True/False
:returns: string -- targetSlo
:returns: string -- targetWorkload
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
LOG.error(_LE('Error getting array, pool, SLO and workload.'))
return falseRet
info = host['capabilities']['location_info']
LOG.debug("Location info is : %(info)s.",
{'info': info})
try:
infoDetail = info.split('#')
targetArraySerialNumber = infoDetail[0]
targetPoolName = infoDetail[1]
targetSlo = infoDetail[2]
targetWorkload = infoDetail[3]
except KeyError:
LOG.error(_LE("Error parsing array, pool, SLO and workload."))
if targetArraySerialNumber not in sourceArraySerialNumber:
LOG.error(_LE(
"The source array : %(sourceArraySerialNumber)s does not "
"match the target array: %(targetArraySerialNumber)s "
"skipping storage-assisted migration."),
{'sourceArraySerialNumber': sourceArraySerialNumber,
'targetArraySerialNumber': targetArraySerialNumber})
return falseRet
if targetPoolName not in sourcePoolName:
LOG.error(_LE(
"Only SLO/workload migration within the same SRP Pool "
"is supported in this version "
"The source pool : %(sourcePoolName)s does not "
"match the target array: %(targetPoolName)s. "
"Skipping storage-assisted migration."),
{'sourcePoolName': sourcePoolName,
'targetPoolName': targetPoolName})
return falseRet
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstanceName, sgName))
if foundStorageGroupInstanceName is None:
LOG.warning(_LW(
"Volume: %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
else:
storageGroupInstance = self.conn.GetInstance(
foundStorageGroupInstanceName)
emcFastSetting = self.utils._get_fast_settings_from_storage_group(
storageGroupInstance)
targetCombination = ("%(targetSlo)s+%(targetWorkload)s"
% {'targetSlo': targetSlo,
'targetWorkload': targetWorkload})
if targetCombination in emcFastSetting:
LOG.error(_LE(
"No action required. Volume: %(volumeName)s is "
"already part of slo/workload combination: "
"%(targetCombination)s."),
{'volumeName': volumeName,
'targetCombination': targetCombination})
return falseRet
return (True, targetSlo, targetWorkload)
def _is_valid_for_storage_assisted_migration(
self, volumeInstanceName, host, sourceArraySerialNumber,
volumeName, volumeStatus):
"""Check if volume is suitable for storage assisted (pool) migration.
:param volumeInstanceName: the volume instance id
:param host: the host object
:param sourceArraySerialNumber: the array serial number of
the original volume
:param volumeName: the name of the volume to be migrated
:param volumeStatus: the status of the volume e.g
:returns: boolean -- True/False
:returns: string -- targetPool
:returns: string -- targetFastPolicy
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
LOG.error(_LE("Error getting target pool name and array."))
return falseRet
info = host['capabilities']['location_info']
LOG.debug("Location info is : %(info)s.",
{'info': info})
try:
infoDetail = info.split('#')
targetArraySerialNumber = infoDetail[0]
targetPoolName = infoDetail[1]
targetFastPolicy = infoDetail[2]
except KeyError:
LOG.error(_LE(
"Error parsing target pool name, array, and fast policy."))
if targetArraySerialNumber not in sourceArraySerialNumber:
LOG.error(_LE(
"The source array : %(sourceArraySerialNumber)s does not "
"match the target array: %(targetArraySerialNumber)s, "
"skipping storage-assisted migration."),
{'sourceArraySerialNumber': sourceArraySerialNumber,
'targetArraySerialNumber': targetArraySerialNumber})
return falseRet
# Get the pool from the source array and check that is different
# to the pool in the target array.
assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstanceName)
assocPoolInstance = self.conn.GetInstance(
assocPoolInstanceName)
if assocPoolInstance['ElementName'] == targetPoolName:
LOG.error(_LE(
"No action required. Volume: %(volumeName)s is "
"already part of pool: %(pool)s."),
{'volumeName': volumeName,
'pool': targetPoolName})
return falseRet
LOG.info(_LI("Volume status is: %s."), volumeStatus)
if (host['capabilities']['storage_protocol'] != self.protocol and
(volumeStatus != 'available' and volumeStatus != 'retyping')):
LOG.error(_LE(
"Only available volumes can be migrated between "
"different protocols."))
return falseRet
return (True, targetPoolName, targetFastPolicy)
def _set_config_file_and_get_extra_specs(self, volume, volumeTypeId=None):
"""Given the volume object get the associated volumetype.
Given the volume object get the associated volumetype and the
extra specs associated with it.
Based on the name of the config group, register the config file
:param volume: the volume object including the volume_type_id
:param volumeTypeId: Optional override of volume['volume_type_id']
:returns: dict -- the extra specs dict
:returns: string -- configuration file
"""
extraSpecs = self.utils.get_volumetype_extraspecs(volume, volumeTypeId)
qosSpecs = self.utils.get_volumetype_qosspecs(volume, volumeTypeId)
configGroup = None
# If there are no extra specs then the default case is assumed.
if extraSpecs:
configGroup = self.configuration.config_group
configurationFile = self._register_config_file_from_config_group(
configGroup)
return extraSpecs, configurationFile, qosSpecs
def _get_ecom_connection(self):
"""Get the ecom connection.
:returns: pywbem.WBEMConnection -- conn, the ecom connection
:raises: VolumeBackendAPIException
"""
ecomx509 = None
if self.ecomUseSSL:
if (self.configuration.safe_get('driver_client_cert_key') and
self.configuration.safe_get('driver_client_cert')):
ecomx509 = {"key_file":
self.configuration.safe_get(
'driver_client_cert_key'),
"cert_file":
self.configuration.safe_get(
'driver_client_cert')}
pywbem.cim_http.wbem_request = emc_vmax_https.wbem_request
conn = pywbem.WBEMConnection(
self.url,
(self.user, self.passwd),
default_namespace='root/emc',
x509=ecomx509,
ca_certs=self.configuration.safe_get('driver_ssl_cert_path'),
no_verification=not self.configuration.safe_get(
'driver_ssl_cert_verify'))
else:
conn = pywbem.WBEMConnection(
self.url,
(self.user, self.passwd),
default_namespace='root/emc')
if conn is None:
exception_message = (_("Cannot connect to ECOM server."))
raise exception.VolumeBackendAPIException(data=exception_message)
return conn
def _find_pool_in_array(self, arrayStr, poolNameInStr, isV3):
"""Find a pool based on the pool name on a given array.
:param arrayStr: the array Serial number (String)
:param poolNameInStr: the name of the poolname (String)
:param isv3: True/False
:returns: foundPoolInstanceName - the CIM Instance Name of the Pool
:returns: string -- systemNameStr
:raises: VolumeBackendAPIException
"""
foundPoolInstanceName = None
systemNameStr = None
storageSystemInstanceName = self.utils.find_storageSystem(
self.conn, arrayStr)
if isV3:
foundPoolInstanceName, systemNameStr = (
self.utils.get_pool_and_system_name_v3(
self.conn, storageSystemInstanceName, poolNameInStr))
else:
foundPoolInstanceName, systemNameStr = (
self.utils.get_pool_and_system_name_v2(
self.conn, storageSystemInstanceName, poolNameInStr))
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool %(poolNameInStr)s is not found.")
% {'poolNameInStr': poolNameInStr})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if systemNameStr is None:
exception_message = (_("Storage system not found for pool "
"%(poolNameInStr)s.")
% {'poolNameInStr': poolNameInStr})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Pool: %(pool)s SystemName: %(systemname)s.",
{'pool': foundPoolInstanceName,
'systemname': systemNameStr})
return foundPoolInstanceName, systemNameStr
def _find_lun(self, volume):
"""Given the volume get the instance from it.
:param volume: volume object
:returns: foundVolumeinstance
"""
foundVolumeinstance = None
volumename = volume['name']
loc = volume['provider_location']
if self.conn is None:
self.conn = self._get_ecom_connection()
if isinstance(loc, six.string_types):
name = ast.literal_eval(loc)
keys = name['keybindings']
systemName = keys['SystemName']
prefix1 = 'SYMMETRIX+'
prefix2 = 'SYMMETRIX-+-'
smiversion = self.utils.get_smi_version(self.conn)
if smiversion > SMI_VERSION_8 and prefix1 in systemName:
keys['SystemName'] = systemName.replace(prefix1, prefix2)
name['keybindings'] = keys
instancename = self.utils.get_instance_name(
name['classname'], name['keybindings'])
# Allow for an external app to delete the volume.
LOG.debug("Volume instance name: %(in)s",
{'in': instancename})
try:
foundVolumeinstance = self.conn.GetInstance(instancename)
except Exception:
foundVolumeinstance = None
if foundVolumeinstance is None:
LOG.debug("Volume %(volumename)s not found on the array.",
{'volumename': volumename})
else:
LOG.debug("Volume name: %(volumename)s Volume instance: "
"%(foundVolumeinstance)s.",
{'volumename': volumename,
'foundVolumeinstance': foundVolumeinstance})
return foundVolumeinstance
def _find_storage_sync_sv_sv(self, snapshot, volume, extraSpecs,
waitforsync=True):
"""Find the storage synchronized name.
:param snapshot: snapshot object
:param volume: volume object
:param extraSpecs: extra specifications
:param waitforsync: boolean -- Wait for Solutions Enabler sync.
:returns: string -- foundsyncname
:returns: string -- storage_system
"""
snapshotname = snapshot['name']
volumename = volume['name']
LOG.debug("Source: %(volumename)s Target: %(snapshotname)s.",
{'volumename': volumename, 'snapshotname': snapshotname})
snapshot_instance = self._find_lun(snapshot)
volume_instance = self._find_lun(volume)
storage_system = volume_instance['SystemName']
classname = 'SE_StorageSynchronized_SV_SV'
bindings = {'SyncedElement': snapshot_instance.path,
'SystemElement': volume_instance.path}
foundsyncname = self.utils.get_instance_name(classname, bindings)
if foundsyncname is None:
LOG.debug(
"Source: %(volumename)s Target: %(snapshotname)s. "
"Storage Synchronized not found.",
{'volumename': volumename,
'snapshotname': snapshotname})
else:
LOG.debug("Storage system: %(storage_system)s. "
"Storage Synchronized instance: %(sync)s.",
{'storage_system': storage_system,
'sync': foundsyncname})
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
if waitforsync:
self.utils.wait_for_sync(self.conn, foundsyncname,
extraSpecs)
return foundsyncname, storage_system
def _find_initiator_names(self, connector):
foundinitiatornames = []
iscsi = 'iscsi'
fc = 'fc'
name = 'initiator name'
if self.protocol.lower() == iscsi and connector['initiator']:
foundinitiatornames.append(connector['initiator'])
elif self.protocol.lower() == fc and connector['wwpns']:
for wwn in connector['wwpns']:
foundinitiatornames.append(wwn)
name = 'world wide port names'
if foundinitiatornames is None or len(foundinitiatornames) == 0:
msg = (_("Error finding %s.") % name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Found %(name)s: %(initiator)s.",
{'name': name,
'initiator': foundinitiatornames})
return foundinitiatornames
def _wrap_find_device_number(self, volume, host):
return self.find_device_number(volume, host)
def find_device_number(self, volume, host):
"""Given the volume dict find a device number.
Find a device number that a host can see
for a volume.
:param volume: the volume dict
:param host: host from connector
:returns: dict -- the data dict
"""
maskedvols = []
data = {}
foundController = None
foundNumDeviceNumber = None
foundMaskingViewName = None
volumeName = volume['name']
volumeInstance = self._find_lun(volume)
storageSystemName = volumeInstance['SystemName']
unitnames = self.conn.ReferenceNames(
volumeInstance.path,
ResultClass='CIM_ProtocolControllerForUnit')
for unitname in unitnames:
controller = unitname['Antecedent']
classname = controller['CreationClassName']
index = classname.find('Symm_LunMaskingView')
if index > -1:
unitinstance = self.conn.GetInstance(unitname,
LocalOnly=False)
numDeviceNumber = int(unitinstance['DeviceNumber'], 16)
foundNumDeviceNumber = numDeviceNumber
foundController = controller
controllerInstance = self.conn.GetInstance(controller,
LocalOnly=False)
propertiesList = controllerInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'ElementName':
cimProperties = properties[1]
foundMaskingViewName = cimProperties.value
devicedict = {'hostlunid': foundNumDeviceNumber,
'storagesystem': storageSystemName,
'maskingview': foundMaskingViewName,
'controller': foundController}
maskedvols.append(devicedict)
if not maskedvols:
LOG.debug(
"Device number not found for volume "
"%(volumeName)s %(volumeInstance)s.",
{'volumeName': volumeName,
'volumeInstance': volumeInstance.path})
else:
host = self.utils.get_host_short_name(host)
hoststr = ("-%(host)s-"
% {'host': host})
for maskedvol in maskedvols:
if hoststr.lower() in maskedvol['maskingview'].lower():
data = maskedvol
if not data:
if len(maskedvols) > 0:
data = maskedvols[0]
LOG.warning(_LW(
"Volume is masked but not to host %(host)s as is "
"expected. Assuming live migration."),
{'host': hoststr})
LOG.debug("Device info: %(data)s.", {'data': data})
return data
def get_target_wwns(self, storageSystem, connector):
"""Find target WWNs.
:param storageSystem: the storage system name
:param connector: the connector dict
:returns: list -- targetWwns, the target WWN list
:raises: VolumeBackendAPIException
"""
targetWwns = set()
storageHardwareService = self.utils.find_storage_hardwareid_service(
self.conn, storageSystem)
hardwareIdInstances = self._find_storage_hardwareids(
connector, storageHardwareService)
LOG.debug(
"EMCGetTargetEndpoints: Service: %(service)s, "
"Storage HardwareIDs: %(hardwareIds)s.",
{'service': storageHardwareService,
'hardwareIds': hardwareIdInstances})
for hardwareIdInstance in hardwareIdInstances:
LOG.debug("HardwareID instance is: %(hardwareIdInstance)s.",
{'hardwareIdInstance': hardwareIdInstance})
try:
targetEndpoints = (
self.utils.get_target_endpoints(
self.conn, hardwareIdInstance))
if not targetEndpoints:
LOG.warning(_LW(
"Unable to get target endpoints for hardwareId "
"%(instance)s."),
{'instance': hardwareIdInstance})
continue
except Exception:
LOG.warning(_LW(
"Unable to get target endpoints for hardwareId "
"%(instance)s."),
{'instance': hardwareIdInstance}, exc_info=True)
continue
LOG.debug("There are %(len)lu endpoints.",
{'len': len(targetEndpoints)})
for targetendpoint in targetEndpoints:
wwn = targetendpoint['Name']
# Add target wwn to the list if it is not already there.
targetWwns.add(wwn)
break
if not targetWwns:
exception_message = (_(
"Unable to get target endpoints for any hardwareIds."))
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Target WWNs: %(targetWwns)s.",
{'targetWwns': targetWwns})
return list(targetWwns)
def _find_storage_hardwareids(
self, connector, hardwareIdManagementService):
"""Find the storage hardware ID instances.
:param connector: the connector dict
:param hardwareIdManagementService: the storage Hardware
management service
:returns: list -- the list of storage hardware ID instances
"""
foundHardwareIdList = []
wwpns = self._find_initiator_names(connector)
hardwareIdInstances = (
self.utils.get_hardware_id_instances_from_array(
self.conn, hardwareIdManagementService))
for hardwareIdInstance in hardwareIdInstances:
storageId = hardwareIdInstance['StorageID']
for wwpn in wwpns:
if wwpn.lower() == storageId.lower():
# Check that the found hardwareId has not been
# deleted. If it has, we don't want to add it to the list.
instance = self.utils.get_existing_instance(
self.conn, hardwareIdInstance.path)
if instance is None:
# HardwareId doesn't exist any more. Skip it.
break
foundHardwareIdList.append(hardwareIdInstance.path)
break
LOG.debug("Storage Hardware IDs for %(wwpns)s is "
"%(foundInstances)s.",
{'wwpns': wwpns,
'foundInstances': foundHardwareIdList})
return foundHardwareIdList
def _register_config_file_from_config_group(self, configGroupName):
"""Given the config group name register the file.
:param configGroupName: the config group name
:returns: string -- configurationFile - name of the configuration file
"""
if configGroupName is None:
return CINDER_EMC_CONFIG_FILE
if hasattr(self.configuration, 'cinder_emc_config_file'):
configurationFile = self.configuration.cinder_emc_config_file
else:
configurationFile = (
("%(prefix)s%(configGroupName)s%(postfix)s"
% {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX,
'configGroupName': configGroupName,
'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX}))
# The file saved in self.configuration may not be the correct one,
# double check.
if configGroupName not in configurationFile:
configurationFile = (
("%(prefix)s%(configGroupName)s%(postfix)s"
% {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX,
'configGroupName': configGroupName,
'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX}))
if os.path.isfile(configurationFile):
LOG.debug("Configuration file : %(configurationFile)s exists.",
{'configurationFile': configurationFile})
else:
exceptionMessage = (_(
"Configuration file %(configurationFile)s does not exist.")
% {'configurationFile': configurationFile})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return configurationFile
def _set_ecom_credentials(self, arrayInfo):
"""Given the array record set the ecom credentials.
:param arrayInfo: record
:raises: VolumeBackendAPIException
"""
ip = arrayInfo['EcomServerIp']
port = arrayInfo['EcomServerPort']
self.user = arrayInfo['EcomUserName']
self.passwd = arrayInfo['EcomPassword']
self.ecomUseSSL = self.configuration.safe_get('driver_use_ssl')
ip_port = ("%(ip)s:%(port)s"
% {'ip': ip,
'port': port})
if self.ecomUseSSL:
self.url = ("https://%(ip_port)s"
% {'ip_port': ip_port})
else:
self.url = ("http://%(ip_port)s"
% {'ip_port': ip_port})
self.conn = self._get_ecom_connection()
def _initial_setup(self, volume, volumeTypeId=None):
"""Necessary setup to accumulate the relevant information.
The volume object has a host in which we can parse the
config group name. The config group name is the key to our EMC
configuration file. The emc configuration file contains pool name
and array name which are mandatory fields.
FastPolicy is optional.
StripedMetaCount is an extra spec that determines whether
the composite volume should be concatenated or striped.
:param volume: the volume Object
:param volumeTypeId: Optional override of volume['volume_type_id']
:returns: dict -- extra spec dict
:raises: VolumeBackendAPIException
"""
try:
extraSpecs, configurationFile, qosSpecs = (
self._set_config_file_and_get_extra_specs(
volume, volumeTypeId))
pool = self._validate_pool(volume)
LOG.debug("Pool returned is %(pool)s.",
{'pool': pool})
arrayInfo = self.utils.parse_file_to_get_array_map(
configurationFile)
poolRecord = self.utils.extract_record(arrayInfo, pool)
if not poolRecord:
exceptionMessage = (_(
"Unable to get corresponding record for pool."))
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._set_ecom_credentials(poolRecord)
isV3 = self.utils.isArrayV3(
self.conn, poolRecord['SerialNumber'])
if isV3:
extraSpecs = self._set_v3_extra_specs(extraSpecs, poolRecord)
else:
# V2 extra specs
extraSpecs = self._set_v2_extra_specs(extraSpecs, poolRecord)
if (qosSpecs.get('qos_spec')
and qosSpecs['qos_specs']['consumer'] != "front-end"):
extraSpecs['qos'] = qosSpecs['qos_specs']['specs']
except Exception:
import sys
exceptionMessage = (_(
"Unable to get configuration information necessary to "
"create a volume: %(errorMessage)s.")
% {'errorMessage': sys.exc_info()[1]})
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return extraSpecs
def _get_pool_and_storage_system(self, extraSpecs):
"""Given the extra specs get the pool and storage system name.
:param extraSpecs: extra specifications
:returns: poolInstanceName The pool instance name
:returns: string -- the storage system name
:raises: VolumeBackendAPIException
"""
try:
array = extraSpecs[ARRAY]
poolInstanceName, storageSystemStr = self._find_pool_in_array(
array, extraSpecs[POOL], extraSpecs[ISV3])
except Exception:
exceptionMessage = (_(
"You must supply an array in your EMC configuration file."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if poolInstanceName is None or storageSystemStr is None:
exceptionMessage = (_(
"Cannot get necessary pool or storage system information."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return poolInstanceName, storageSystemStr
def _populate_masking_dict(self, volume, connector, extraSpecs):
"""Get all the names of the maskingView and subComponents.
:param volume: the volume object
:param connector: the connector object
:param extraSpecs: extra specifications
:returns: dict -- a dictionary with masking view information
"""
maskingViewDict = {}
hostName = connector['host']
uniqueName = self.utils.generate_unique_trunc_pool(extraSpecs[POOL])
isV3 = extraSpecs[ISV3]
maskingViewDict['isV3'] = isV3
protocol = self.utils.get_short_protocol_type(self.protocol)
shortHostName = self.utils.get_host_short_name(hostName)
if isV3:
slo = extraSpecs[SLO]
workload = extraSpecs[WORKLOAD]
maskingViewDict['slo'] = slo
maskingViewDict['workload'] = workload
maskingViewDict['pool'] = uniqueName
if slo:
prefix = (
("OS-%(shortHostName)s-%(poolName)s-%(slo)s-%(workload)s"
% {'shortHostName': shortHostName,
'poolName': uniqueName,
'slo': slo,
'workload': workload}))
else:
prefix = (
("OS-%(shortHostName)s-No_SLO"
% {'shortHostName': shortHostName}))
else:
maskingViewDict['fastPolicy'] = extraSpecs[FASTPOLICY]
if maskingViewDict['fastPolicy']:
uniqueName = self.utils.generate_unique_trunc_fastpolicy(
maskingViewDict['fastPolicy']) + '-FP'
prefix = (
("OS-%(shortHostName)s-%(poolName)s-%(protocol)s"
% {'shortHostName': shortHostName,
'poolName': uniqueName,
'protocol': protocol}))
maskingViewDict['sgGroupName'] = ("%(prefix)s-SG"
% {'prefix': prefix})
maskingViewDict['maskingViewName'] = ("%(prefix)s-MV"
% {'prefix': prefix})
volumeName = volume['name']
volumeInstance = self._find_lun(volume)
storageSystemName = volumeInstance['SystemName']
maskingViewDict['controllerConfigService'] = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
# The portGroup is gotten from emc xml config file.
maskingViewDict['pgGroupName'] = extraSpecs[PORTGROUPNAME]
maskingViewDict['igGroupName'] = (
("OS-%(shortHostName)s-%(protocol)s-IG"
% {'shortHostName': shortHostName,
'protocol': protocol}))
maskingViewDict['connector'] = connector
maskingViewDict['volumeInstance'] = volumeInstance
maskingViewDict['volumeName'] = volumeName
maskingViewDict['storageSystemName'] = storageSystemName
return maskingViewDict
def _add_volume_to_default_storage_group_on_create(
self, volumeDict, volumeName, storageConfigService,
storageSystemName, fastPolicyName, extraSpecs):
"""Add the volume to the default storage group for that policy.
On a create when fast policy is enable add the volume to the default
storage group for that policy. If it fails do the necessary rollback.
:param volumeDict: the volume dictionary
:param volumeName: the volume name (String)
:param storageConfigService: the storage configuration service
:param storageSystemName: the storage system name (String)
:param fastPolicyName: the fast policy name (String)
:param extraSpecs: extra specifications
:returns: dict -- maskingViewDict with masking view information
:raises: VolumeBackendAPIException
"""
try:
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
defaultSgName = self.fast.format_default_sg_string(fastPolicyName)
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
volumeName, fastPolicyName, extraSpecs)
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, defaultSgName))
if foundStorageGroupInstanceName is None:
exceptionMessage = (_(
"Error adding Volume: %(volumeName)s "
"with instance path: %(volumeInstancePath)s.")
% {'volumeName': volumeName,
'volumeInstancePath': volumeInstance.path})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
errorMessage = (_(
"Rolling back %(volumeName)s by deleting it.")
% {'volumeName': volumeName})
LOG.exception(errorMessage)
self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _create_and_get_unbound_volume(
self, conn, storageConfigService, compositeVolumeInstanceName,
additionalSize, extraSpecs):
"""Create an unbound volume.
Create an unbound volume so it is in the correct state to add to a
composite volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service instance name
:param compositeVolumeInstanceName: the composite volume instance name
:param additionalSize: the size you want to increase the volume by
:param extraSpecs: extra specifications
:returns: volume instance modifiedCompositeVolumeInstance
"""
assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
conn, compositeVolumeInstanceName)
appendVolumeInstance = self._create_and_get_volume_instance(
conn, storageConfigService, assocPoolInstanceName, 'appendVolume',
additionalSize, extraSpecs)
isVolumeBound = self.utils.is_volume_bound_to_pool(
conn, appendVolumeInstance)
if 'True' in isVolumeBound:
appendVolumeInstance = (
self._unbind_and_get_volume_from_storage_pool(
conn, storageConfigService,
appendVolumeInstance.path, 'appendVolume', extraSpecs))
return appendVolumeInstance
def _create_and_get_volume_instance(
self, conn, storageConfigService, poolInstanceName,
volumeName, volumeSize, extraSpecs):
"""Create and get a new volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service instance name
:param poolInstanceName: the pool instance name
:param volumeName: the volume name
:param volumeSize: the size to create the volume
:param extraSpecs: extra specifications
:returns: volumeInstance -- the volume instance
"""
volumeDict, _rc = (
self.provision.create_volume_from_pool(
self.conn, storageConfigService, volumeName, poolInstanceName,
volumeSize, extraSpecs))
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
return volumeInstance
def _unbind_and_get_volume_from_storage_pool(
self, conn, storageConfigService,
volumeInstanceName, volumeName, extraSpecs):
"""Unbind a volume from a pool and return the unbound volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service instance name
:param volumeInstanceName: the volume instance name
:param volumeName: string the volumeName
:param extraSpecs: extra specifications
:returns: unboundVolumeInstance -- the unbound volume instance
"""
_rc, _job = (
self.provision.unbind_volume_from_storage_pool(
conn, storageConfigService, volumeInstanceName,
volumeName, extraSpecs))
# Check that the volume in unbound
volumeInstance = conn.GetInstance(volumeInstanceName)
isVolumeBound = self.utils.is_volume_bound_to_pool(
conn, volumeInstance)
if 'False' not in isVolumeBound:
exceptionMessage = (_(
"Failed to unbind volume %(volume)s")
% {'volume': volumeInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return volumeInstance
def _modify_and_get_composite_volume_instance(
self, conn, elementCompositionServiceInstanceName, volumeInstance,
appendVolumeInstanceName, volumeName, compositeType, extraSpecs):
"""Given an existing composite volume add a new composite volume to it.
:param conn: the connection information to the ecom server
:param elementCompositionServiceInstanceName: the storage element
composition service instance name
:param volumeInstance: the volume instance
:param appendVolumeInstanceName: the appended volume instance name
:param volumeName: the volume name
:param compositeType: concatenated
:param extraSpecs: extra specifications
:returns: int -- the return code
:returns: dict -- modifiedVolumeDict - the modified volume dict
"""
isComposite = self.utils.check_if_volume_is_composite(
self.conn, volumeInstance)
if 'True' in isComposite:
rc, job = self.provision.modify_composite_volume(
conn, elementCompositionServiceInstanceName,
volumeInstance.path, appendVolumeInstanceName, extraSpecs)
elif 'False' in isComposite:
rc, job = self.provision.create_new_composite_volume(
conn, elementCompositionServiceInstanceName,
volumeInstance.path, appendVolumeInstanceName, compositeType,
extraSpecs)
else:
LOG.error(_LE(
"Unable to determine whether %(volumeName)s is "
"composite or not."),
{'volumeName': volumeName})
raise
modifiedVolumeDict = self.provision.get_volume_dict_from_job(
conn, job['Job'])
return rc, modifiedVolumeDict
def _get_or_create_default_storage_group(
self, conn, storageSystemName, volumeDict, volumeName,
fastPolicyName, extraSpecs):
"""Get or create a default storage group for a fast policy.
:param conn: the connection information to the ecom server
:param storageSystemName: the storage system name
:param volumeDict: the volume dictionary
:param volumeName: the volume name
:param fastPolicyName: the fast policy name
:param extraSpecs: extra specifications
:returns: defaultStorageGroupInstanceName
"""
controllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigService, fastPolicyName,
volumeInstance, extraSpecs))
return defaultStorageGroupInstanceName
def _create_cloned_volume(
self, cloneVolume, sourceVolume, extraSpecs, isSnapshot=False):
"""Create a clone volume from the source volume.
:param cloneVolume: clone volume
:param sourceVolume: source of the clone volume
:param extraSpecs: extra specs
:param isSnapshot: boolean -- Defaults to False
:returns: dict -- cloneDict the cloned volume dictionary
:raises: VolumeBackendAPIException
"""
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
LOG.info(_LI(
"Create a replica from Volume: Clone Volume: %(cloneName)s "
"Source Volume: %(sourceName)s."),
{'cloneName': cloneName,
'sourceName': sourceName})
self.conn = self._get_ecom_connection()
sourceInstance = self._find_lun(sourceVolume)
storageSystem = sourceInstance['SystemName']
repServCapabilityInstanceName = (
self.utils.find_replication_service_capabilities(self.conn,
storageSystem))
is_clone_license = self.utils.is_clone_licensed(
self.conn, repServCapabilityInstanceName, extraSpecs[ISV3])
if is_clone_license is False:
exceptionMessage = (_(
"Clone feature is not licensed on %(storageSystem)s.")
% {'storageSystem': storageSystem})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
repServiceInstanceName = self.utils.find_replication_service(
self.conn, storageSystem)
LOG.debug("Create volume replica: Volume: %(cloneName)s "
"Source Volume: %(sourceName)s "
"Method: CreateElementReplica "
"ReplicationService: %(service)s ElementName: "
"%(elementname)s SyncType: 8 SourceElement: "
"%(sourceelement)s.",
{'cloneName': cloneName,
'sourceName': sourceName,
'service': repServiceInstanceName,
'elementname': cloneName,
'sourceelement': sourceInstance.path})
if extraSpecs[ISV3]:
rc, cloneDict = self._create_replica_v3(repServiceInstanceName,
cloneVolume,
sourceVolume,
sourceInstance,
isSnapshot,
extraSpecs)
else:
rc, cloneDict = self._create_clone_v2(repServiceInstanceName,
cloneVolume,
sourceVolume,
sourceInstance,
isSnapshot,
extraSpecs)
if not isSnapshot:
old_size_gbs = self.utils.convert_bits_to_gbs(
self.utils.get_volume_size(
self.conn, sourceInstance))
if cloneVolume['size'] != old_size_gbs:
LOG.info(_LI("Extending clone %(cloneName)s to "
"%(newSize)d GBs"),
{'cloneName': cloneName,
'newSize': cloneVolume['size']})
cloneInstance = self.utils.find_volume_instance(
self.conn, cloneDict, cloneName)
self._extend_volume(
cloneInstance, cloneName, cloneVolume['size'],
old_size_gbs, extraSpecs)
LOG.debug("Leaving _create_cloned_volume: Volume: "
"%(cloneName)s Source Volume: %(sourceName)s "
"Return code: %(rc)lu.",
{'cloneName': cloneName,
'sourceName': sourceName,
'rc': rc})
# Adding version information
cloneDict['version'] = self.version
return cloneDict
def _add_clone_to_default_storage_group(
self, fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs):
"""Helper function to add clone to the default storage group.
:param fastPolicyName: the fast policy name
:param storageSystemName: the storage system name
:param cloneDict: clone dictionary
:param cloneName: clone name
:param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
# Check if the clone/snapshot volume already part of the default sg.
cloneInstance = self.utils.find_volume_instance(
self.conn, cloneDict, cloneName)
if self.fast.is_volume_in_default_SG(self.conn, cloneInstance.path):
return
# If FAST enabled place clone volume or volume from snapshot to
# default storage group.
LOG.debug("Adding volume: %(cloneName)s to default storage group "
"for FAST policy: %(fastPolicyName)s.",
{'cloneName': cloneName,
'fastPolicyName': fastPolicyName})
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystemName))
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, cloneDict, cloneName,
fastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
exceptionMessage = (_(
"Unable to create or get default storage group for FAST "
"policy: %(fastPolicyName)s.")
% {'fastPolicyName': fastPolicyName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._add_volume_to_default_storage_group_on_create(
cloneDict, cloneName, storageConfigService, storageSystemName,
fastPolicyName, extraSpecs)
def _delete_volume(self, volume):
"""Helper function to delete the specified volume.
:param volume: volume object to be deleted
:returns: tuple -- rc (int return code), volumeName (string vol name)
"""
volumeName = volume['name']
rc = -1
errorRet = (rc, volumeName)
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE(
"Volume %(name)s not found on the array. "
"No volume to delete."),
{'name': volumeName})
return errorRet
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, volumeInstance['SystemName'])
deviceId = volumeInstance['DeviceID']
if extraSpecs[ISV3]:
rc = self._delete_from_pool_v3(
storageConfigService, volumeInstance, volumeName,
deviceId, extraSpecs)
else:
rc = self._delete_from_pool(storageConfigService, volumeInstance,
volumeName, deviceId,
extraSpecs[FASTPOLICY],
extraSpecs)
return (rc, volumeName)
def _remove_device_from_storage_group(
self, controllerConfigurationService, volumeInstanceName,
volumeName, extraSpecs):
"""Check if volume is part of a storage group prior to delete.
Log a warning if volume is part of storage group.
:param controllerConfigurationService: controller configuration service
:param volumeInstanceName: volume instance name
:param volumeName: volume name (string)
:param extraSpecs: extra specifications
"""
storageGroupInstanceNames = (
self.masking.get_associated_masking_groups_from_device(
self.conn, volumeInstanceName))
if storageGroupInstanceNames:
LOG.warning(_LW(
"Pre check for deletion. "
"Volume: %(volumeName)s is part of a storage group. "
"Attempting removal from %(storageGroupInstanceNames)s."),
{'volumeName': volumeName,
'storageGroupInstanceNames': storageGroupInstanceNames})
for storageGroupInstanceName in storageGroupInstanceNames:
self.provision.remove_device_from_storage_group(
self.conn, controllerConfigurationService,
storageGroupInstanceName,
volumeInstanceName, volumeName, extraSpecs)
def _find_lunmasking_scsi_protocol_controller(self, storageSystemName,
connector):
"""Find LunMaskingSCSIProtocolController for the local host.
Find out how many volumes are mapped to a host
associated to the LunMaskingSCSIProtocolController.
:param storageSystemName: the storage system name
:param connector: volume object to be deleted
:returns: foundControllerInstanceName
"""
foundControllerInstanceName = None
initiators = self._find_initiator_names(connector)
storageSystemInstanceName = self.utils.find_storageSystem(
self.conn, storageSystemName)
controllerInstanceNames = self.conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='EMC_LunMaskingSCSIProtocolController')
for controllerInstanceName in controllerInstanceNames:
try:
# This is a check to see if the controller has
# been deleted.
self.conn.GetInstance(controllerInstanceName)
storageHardwareIdInstances = self.conn.Associators(
controllerInstanceName,
ResultClass='EMC_StorageHardwareID')
for storageHardwareIdInstance in storageHardwareIdInstances:
# If EMC_StorageHardwareID matches the initiator, we
# found the existing EMC_LunMaskingSCSIProtocolController.
hardwareid = storageHardwareIdInstance['StorageID']
for initiator in initiators:
if hardwareid.lower() == initiator.lower():
# This is a check to see if the controller
# has been deleted.
instance = self.utils.get_existing_instance(
self.conn, controllerInstanceName)
if instance is None:
# Skip this controller as it doesn't exist
# any more.
pass
else:
foundControllerInstanceName = (
controllerInstanceName)
break
if foundControllerInstanceName is not None:
break
except pywbem.cim_operations.CIMError as arg:
instance = self.utils.process_exception_args(
arg, controllerInstanceName)
if instance is None:
# Skip this controller as it doesn't exist any more.
pass
if foundControllerInstanceName is not None:
break
LOG.debug("LunMaskingSCSIProtocolController for storage system "
"%(storage_system)s and initiator %(initiator)s is "
"%(ctrl)s.",
{'storage_system': storageSystemName,
'initiator': initiators,
'ctrl': foundControllerInstanceName})
return foundControllerInstanceName
def get_num_volumes_mapped(self, volume, connector):
"""Returns how many volumes are in the same zone as the connector.
Find out how many volumes are mapped to a host
associated to the LunMaskingSCSIProtocolController.
:param volume: volume object to be deleted
:param connector: volume object to be deleted
:returns: int -- numVolumesMapped
:raises: VolumeBackendAPIException
"""
volumename = volume['name']
vol_instance = self._find_lun(volume)
if vol_instance is None:
msg = (_("Volume %(name)s not found on the array. "
"Cannot determine if there are volumes mapped.")
% {'name': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
storage_system = vol_instance['SystemName']
ctrl = self._find_lunmasking_scsi_protocol_controller(
storage_system,
connector)
LOG.debug("LunMaskingSCSIProtocolController for storage system "
"%(storage)s and %(connector)s is %(ctrl)s.",
{'storage': storage_system,
'connector': connector,
'ctrl': ctrl})
# Return 0 if masking view does not exist.
if ctrl is None:
return 0
associators = self.conn.Associators(
ctrl,
ResultClass='EMC_StorageVolume')
numVolumesMapped = len(associators)
LOG.debug("Found %(numVolumesMapped)d volumes on storage system "
"%(storage)s mapped to %(connector)s.",
{'numVolumesMapped': numVolumesMapped,
'storage': storage_system,
'connector': connector})
return numVolumesMapped
def _delete_snapshot(self, snapshot):
"""Helper function to delete the specified snapshot.
:param snapshot: snapshot object to be deleted
:raises: VolumeBackendAPIException
"""
LOG.debug("Entering delete_snapshot.")
snapshotname = snapshot['name']
LOG.info(_LI("Delete Snapshot: %(snapshot)s."),
{'snapshot': snapshotname})
extraSpecs = self._initial_setup(snapshot)
self.conn = self._get_ecom_connection()
if not extraSpecs[ISV3]:
snapshotInstance = self._find_lun(snapshot)
if snapshotInstance is None:
LOG.error(_LE(
"Snapshot %(snapshotname)s not found on the array. "
"No volume to delete."),
{'snapshotname': snapshotname})
return (-1, snapshotname)
storageSystem = snapshotInstance['SystemName']
# Wait for it to fully sync in case there is an ongoing
# create volume from snapshot request.
syncName = self.utils.find_sync_sv_by_target(
self.conn, storageSystem, snapshotInstance, extraSpecs,
True)
if syncName is None:
LOG.info(_LI(
"Snapshot: %(snapshot)s: not found on the array."),
{'snapshot': snapshotname})
else:
repservice = self.utils.find_replication_service(self.conn,
storageSystem)
if repservice is None:
exception_message = _(
"Cannot find Replication Service to"
" delete snapshot %s.") % snapshotname
raise exception.VolumeBackendAPIException(
data=exception_message)
# Break the replication relationship
LOG.debug("Deleting snap relationship: Target: %(snapshot)s "
"Method: ModifyReplicaSynchronization "
"Replication Service: %(service)s Operation: 8 "
"Synchronization: %(syncName)s.",
{'snapshot': snapshotname,
'service': repservice,
'syncName': syncName})
self.provision.delete_clone_relationship(
self.conn, repservice, syncName, extraSpecs, True)
# Delete the target device.
self._delete_volume(snapshot)
def create_consistencygroup(self, context, group):
"""Creates a consistency group.
:param context: the context
:param group: the group object to be created
:returns: dict -- modelUpdate = {'status': 'available'}
:raises: VolumeBackendAPIException
"""
LOG.info(_LI("Create Consistency Group: %(group)s."),
{'group': group['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
volumeTypeId = group['volume_type_id'].replace(",", "")
cgName = self.utils.truncate_string(group['id'], 8)
extraSpecs = self._initial_setup(None, volumeTypeId)
_poolInstanceName, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
self.conn = self._get_ecom_connection()
# Find storage system.
try:
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
self.provision.create_consistency_group(
self.conn, replicationService, cgName, extraSpecs)
except Exception:
exceptionMessage = (_("Failed to create consistency group:"
" %(cgName)s.")
% {'cgName': cgName})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group.
:param context: the context
:param group: the group object to be deleted
:param volumes: the list of volumes in the consisgroup to be deleted
:returns: dict -- modelUpdate
:returns: list -- list of volume objects
:raises: VolumeBackendAPIException
"""
LOG.info(_LI("Delete Consistency Group: %(group)s."),
{'group': group['id']})
cgName = self.utils.truncate_string(group['id'], 8)
modelUpdate = {}
modelUpdate['status'] = group['status']
volumeTypeId = group['volume_type_id'].replace(",", "")
extraSpecs = self._initial_setup(None, volumeTypeId)
_poolInstanceName, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
try:
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
storageConfigservice = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
cgInstanceName = self._find_consistency_group(
replicationService, cgName)
if cgInstanceName is None:
exception_message = (_("Cannot find CG group %s.") %
cgName)
raise exception.VolumeBackendAPIException(
data=exception_message)
memberInstanceNames = self._get_members_of_replication_group(
cgInstanceName)
self.provision.delete_consistency_group(self.conn,
replicationService,
cgInstanceName, cgName,
extraSpecs)
# Do a bulk delete, a lot faster than single deletes.
if memberInstanceNames:
volumes, modelUpdate = self._do_bulk_delete(
storageSystem, memberInstanceNames, storageConfigservice,
volumes, modelUpdate, extraSpecs[ISV3], extraSpecs)
except Exception:
exceptionMessage = (_(
"Failed to delete consistency group: %(cgName)s.")
% {'cgName': cgName})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate, volumes
def _do_bulk_delete(self, storageSystem, memberInstanceNames,
storageConfigservice, volumes, modelUpdate, isV3,
extraSpecs):
"""Do a bulk delete.
:param storageSystem: storage system name
:param memberInstanceNames: volume Instance names
:param storageConfigservice: storage config service
:param volumes: volume objects
:param modelUpdate: dict
:param isV3: boolean
:param extraSpecs: extra specifications
:returns: list -- list of volume objects
:returns: dict -- modelUpdate
"""
try:
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystem))
for memberInstanceName in memberInstanceNames:
self._remove_device_from_storage_group(
controllerConfigurationService, memberInstanceName,
'Member Volume', extraSpecs)
if isV3:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception:
for volumeRef in volumes:
volumeRef['status'] = 'error_deleting'
modelUpdate['status'] = 'error_deleting'
return volumes, modelUpdate
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot.
:param context: the context
:param cgsnapshot: the consistency group snapshot to be created
:param snapshots: snapshots
:returns: dict -- modelUpdate
:returns: list -- list of snapshots
:raises: VolumeBackendAPIException
"""
consistencyGroup = cgsnapshot.get('consistencygroup')
snapshots_model_update = []
LOG.info(_LI(
"Create snapshot for Consistency Group %(cgId)s "
"cgsnapshotID: %(cgsnapshot)s."),
{'cgsnapshot': cgsnapshot['id'],
'cgId': cgsnapshot['consistencygroup_id']})
cgName = self.utils.truncate_string(
cgsnapshot['consistencygroup_id'], 8)
volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "")
extraSpecs = self._initial_setup(None, volumeTypeId)
self.conn = self._get_ecom_connection()
_poolInstanceName, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
try:
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
cgInstanceName = (
self._find_consistency_group(replicationService, cgName))
if cgInstanceName is None:
exception_message = (_("Cannot find CG group %s.") % cgName)
raise exception.VolumeBackendAPIException(
data=exception_message)
memberInstanceNames = self._get_members_of_replication_group(
cgInstanceName)
# Create the target consistency group.
targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8)
self.provision.create_consistency_group(
self.conn, replicationService, targetCgName, extraSpecs)
targetCgInstanceName = self._find_consistency_group(
replicationService, targetCgName)
LOG.info(_LI("Create target consistency group %(targetCg)s."),
{'targetCg': targetCgInstanceName})
for memberInstanceName in memberInstanceNames:
volInstance = self.conn.GetInstance(
memberInstanceName, LocalOnly=False)
numOfBlocks = volInstance['NumberOfBlocks']
blockSize = volInstance['BlockSize']
volumeSizeInbits = numOfBlocks * blockSize
targetVolumeName = 'targetVol'
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
if extraSpecs[ISV3]:
_rc, volumeDict, _storageSystemName = (
self._create_v3_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
else:
_rc, volumeDict, _storageSystemName = (
self._create_composite_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetVolumeName)
LOG.debug("Create target volume for member volume "
"Source volume: %(memberVol)s "
"Target volume %(targetVol)s.",
{'memberVol': memberInstanceName,
'targetVol': targetVolumeInstance.path})
self.provision.add_volume_to_cg(self.conn,
replicationService,
targetCgInstanceName,
targetVolumeInstance.path,
targetCgName,
targetVolumeName,
extraSpecs)
# Less than 5 characters relationship name.
relationName = self.utils.truncate_string(cgsnapshot['id'], 5)
if extraSpecs[ISV3]:
self.provisionv3.create_group_replica(
self.conn, replicationService, cgInstanceName,
targetCgInstanceName, relationName, extraSpecs)
else:
self.provision.create_group_replica(
self.conn, replicationService, cgInstanceName,
targetCgInstanceName, relationName, extraSpecs)
# Break the replica group relationship.
rgSyncInstanceName = self.utils.find_group_sync_rg_by_target(
self.conn, storageSystem, targetCgInstanceName, extraSpecs,
True)
if rgSyncInstanceName is not None:
repservice = self.utils.find_replication_service(
self.conn, storageSystem)
if repservice is None:
exception_message = (_(
"Cannot find Replication service on system %s.") %
storageSystem)
raise exception.VolumeBackendAPIException(
data=exception_message)
if extraSpecs[ISV3]:
# Operation 7: dissolve for snapVx.
operation = self.utils.get_num(9, '16')
self.provisionv3.break_replication_relationship(
self.conn, repservice, rgSyncInstanceName, operation,
extraSpecs)
else:
self.provision.delete_clone_relationship(self.conn, repservice,
rgSyncInstanceName,
extraSpecs)
except Exception:
exceptionMessage = (_("Failed to create snapshot for cg:"
" %(cgName)s.")
% {'cgName': cgName})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return modelUpdate, snapshots_model_update
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete a cgsnapshot.
:param context: the context
:param cgsnapshot: the consistency group snapshot to be created
:param snapshots: snapshots
:returns: dict -- modelUpdate
:returns: list -- list of snapshots
:raises: VolumeBackendAPIException
"""
consistencyGroup = cgsnapshot.get('consistencygroup')
model_update = {}
snapshots_model_update = []
LOG.info(_LI(
"Delete snapshot for source CG %(cgId)s "
"cgsnapshotID: %(cgsnapshot)s."),
{'cgsnapshot': cgsnapshot['id'],
'cgId': cgsnapshot['consistencygroup_id']})
model_update['status'] = cgsnapshot['status']
volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "")
extraSpecs = self._initial_setup(None, volumeTypeId)
self.conn = self._get_ecom_connection()
_poolInstanceName, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
try:
targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8)
model_update, snapshots = self._delete_cg_and_members(
storageSystem, targetCgName, model_update,
snapshots, extraSpecs)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': fields.SnapshotStatus.DELETED})
except Exception:
exceptionMessage = (_("Failed to delete snapshot for cg: "
"%(cgId)s.")
% {'cgId': cgsnapshot['consistencygroup_id']})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return model_update, snapshots_model_update
def _find_consistency_group(self, replicationService, cgName):
"""Finds a CG given its name.
:param replicationService: the replication service
:param cgName: the consistency group name
:returns: foundCgInstanceName
"""
foundCgInstanceName = None
cgInstanceNames = (
self.conn.AssociatorNames(replicationService,
ResultClass='CIM_ReplicationGroup'))
for cgInstanceName in cgInstanceNames:
instance = self.conn.GetInstance(cgInstanceName, LocalOnly=False)
if cgName == instance['ElementName']:
foundCgInstanceName = cgInstanceName
break
return foundCgInstanceName
def _get_members_of_replication_group(self, cgInstanceName):
"""Get the members of consistency group.
:param cgInstanceName: the CG instance name
:returns: list -- memberInstanceNames
"""
memberInstanceNames = self.conn.AssociatorNames(
cgInstanceName,
AssocClass='CIM_OrderedMemberOfCollection')
return memberInstanceNames
def _create_composite_volume(
self, volume, volumeName, volumeSize, extraSpecs,
memberCount=None):
"""Create a composite volume (V2).
:param volume: the volume object
:param volumeName: the name of the volume
:param volumeSize: the size of the volume
:param extraSpecs: extra specifications
:param memberCount: the number of meta members in a composite volume
:returns: int -- return code
:returns: dict -- volumeDict
:returns: string -- storageSystemName
:raises: VolumeBackendAPIException
"""
if not memberCount:
memberCount, errorDesc = self.utils.determine_member_count(
volume['size'], extraSpecs[MEMBERCOUNT],
extraSpecs[COMPOSITETYPE])
if errorDesc is not None:
exceptionMessage = (_("The striped meta count of "
"%(memberCount)s is too small for "
"volume: %(volumeName)s, "
"with size %(volumeSize)s.")
% {'memberCount': memberCount,
'volumeName': volumeName,
'volumeSize': volume['size']})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
"Size: %(size)lu MemberCount: %(memberCount)s.",
{'volume': volumeName,
'pool': poolInstanceName,
'storageSystem': storageSystemName,
'size': volumeSize,
'memberCount': memberCount})
elementCompositionService = (
self.utils.find_element_composition_service(self.conn,
storageSystemName))
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
# If FAST is intended to be used we must first check that the pool
# is associated with the correct storage tier.
if extraSpecs[FASTPOLICY] is not None:
foundPoolInstanceName = self.fast.get_pool_associated_to_policy(
self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY],
storageConfigService, poolInstanceName)
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool: %(poolName)s. "
"is not associated to storage tier for "
"fast policy %(fastPolicy)s.")
% {'poolName': extraSpecs[POOL],
'fastPolicy':
extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
compositeType = self.utils.get_composite_type(
extraSpecs[COMPOSITETYPE])
volumeDict, rc = self.provision.create_composite_volume(
self.conn, elementCompositionService, volumeSize, volumeName,
poolInstanceName, compositeType, memberCount, extraSpecs)
# Now that we have already checked that the pool is associated with
# the correct storage tier and the volume was successfully created
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy.
if extraSpecs[FASTPOLICY]:
LOG.info(_LI(
"Adding volume: %(volumeName)s to default storage group"
" for FAST policy: %(fastPolicyName)s."),
{'volumeName': volumeName,
'fastPolicyName': extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, volumeDict,
volumeName, extraSpecs[FASTPOLICY], extraSpecs))
if not defaultStorageGroupInstanceName:
exceptionMessage = (_(
"Unable to create or get default storage group for "
"FAST policy: %(fastPolicyName)s.")
% {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# If qos exists, update storage group to reflect qos parameters
if 'qos' in extraSpecs:
self.utils.update_storagegroup_qos(
self.conn, defaultStorageGroupInstanceName, extraSpecs)
self._add_volume_to_default_storage_group_on_create(
volumeDict, volumeName, storageConfigService,
storageSystemName, extraSpecs[FASTPOLICY], extraSpecs)
return rc, volumeDict, storageSystemName
def _create_v3_volume(
self, volume, volumeName, volumeSize, extraSpecs):
"""Create a volume (V3).
:param volume: the volume object
:param volumeName: the volume name
:param volumeSize: the volume size
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- volumeDict
:returns: string -- storageSystemName
:raises: VolumeBackendAPIException
"""
isValidSLO, isValidWorkload = self.utils.verify_slo_workload(
extraSpecs[SLO], extraSpecs[WORKLOAD])
if not isValidSLO or not isValidWorkload:
exceptionMessage = (_(
"Either SLO: %(slo)s or workload %(workload)s is invalid. "
"Examine previous error statement for valid values.")
% {'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
# Check to see if SLO and Workload are configured on the array.
storagePoolCapability = self.provisionv3.get_storage_pool_capability(
self.conn, poolInstanceName)
if extraSpecs[SLO]:
if storagePoolCapability:
storagePoolSetting = self.provisionv3.get_storage_pool_setting(
self.conn, storagePoolCapability, extraSpecs[SLO],
extraSpecs[WORKLOAD])
if not storagePoolSetting:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s or workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
exceptionMessage = (_(
"Cannot determine storage pool settings."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
"Size: %(size)lu.",
{'volume': volumeName,
'pool': poolInstanceName,
'storageSystem': storageSystemName,
'size': volumeSize})
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
# A volume created without specifying a storage group during
# creation time is allocated from the default SRP pool and
# assigned the optimized SLO.
sgInstanceName = self._get_or_create_storage_group_v3(
extraSpecs[POOL], extraSpecs[SLO],
extraSpecs[WORKLOAD], storageSystemName, extraSpecs)
volumeDict, rc = self.provisionv3.create_volume_from_sg(
self.conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs)
return rc, volumeDict, storageSystemName
def _get_or_create_storage_group_v3(
self, poolName, slo, workload, storageSystemName, extraSpecs):
"""Get or create storage group_v3 (V3).
:param poolName: the SRP pool nsmr
:param slo: the SLO
:param workload: the workload
:param storageSystemName: storage system name
:param extraSpecs: extra specifications
:returns: sgInstanceName
"""
storageGroupName, controllerConfigService, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
self.conn, poolName, slo, workload, storageSystemName))
if sgInstanceName is None:
sgInstanceName = self.provisionv3.create_storage_group_v3(
self.conn, controllerConfigService, storageGroupName,
poolName, slo, workload, extraSpecs)
# If qos exists, update storage group to reflect qos parameters
if 'qos' in extraSpecs:
self.utils.update_storagegroup_qos(
self.conn, sgInstanceName, extraSpecs)
return sgInstanceName
def _extend_composite_volume(self, volumeInstance, volumeName,
newSize, additionalVolumeSize, extraSpecs):
"""Extend a composite volume (V2).
:param volumeInstance: the volume instance
:param volumeName: the name of the volume
:param newSize: in GBs
:param additionalVolumeSize: additional volume size
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- modifiedVolumeDict
:raises: VolumeBackendAPIException
"""
# Is the volume extendable.
isConcatenated = self.utils.check_if_volume_is_extendable(
self.conn, volumeInstance)
if 'True' not in isConcatenated:
exceptionMessage = (_(
"Volume: %(volumeName)s is not a concatenated volume. "
"You can only perform extend on concatenated volume. "
"Exiting...")
% {'volumeName': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
else:
compositeType = self.utils.get_composite_type(CONCATENATED)
LOG.debug("Extend Volume: %(volume)s New size: %(newSize)s GBs.",
{'volume': volumeName,
'newSize': newSize})
deviceId = volumeInstance['DeviceID']
storageSystemName = volumeInstance['SystemName']
LOG.debug(
"Device ID: %(deviceid)s: Storage System: "
"%(storagesystem)s.",
{'deviceid': deviceId,
'storagesystem': storageSystemName})
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
elementCompositionService = (
self.utils.find_element_composition_service(
self.conn, storageSystemName))
# Create a volume to the size of the
# newSize - oldSize = additionalVolumeSize.
unboundVolumeInstance = self._create_and_get_unbound_volume(
self.conn, storageConfigService, volumeInstance.path,
additionalVolumeSize, extraSpecs)
if unboundVolumeInstance is None:
exceptionMessage = (_(
"Error Creating unbound volume on an Extend operation."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
# Add the new unbound volume to the original composite volume.
rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn, elementCompositionService, volumeInstance,
unboundVolumeInstance.path, volumeName, compositeType,
extraSpecs))
if modifiedVolumeDict is None:
exceptionMessage = (_(
"On an Extend Operation, error adding volume to composite "
"volume: %(volumename)s.")
% {'volumename': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return rc, modifiedVolumeDict
def _slo_workload_migration(self, volumeInstance, volume, host,
volumeName, volumeStatus, newType,
extraSpecs):
"""Migrate from SLO/Workload combination to another (V3).
:param volumeInstance: the volume instance
:param volume: the volume object
:param host: the host object
:param volumeName: the name of the volume
:param volumeStatus: the volume status
:param newType: the type to migrate to
:param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
storageGroupName = self.utils.get_v3_storage_group_name(
extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD])
volumeInstanceName = volumeInstance.path
isValid, targetSlo, targetWorkload = (
self._is_valid_for_storage_assisted_migration_v3(
volumeInstanceName, host, extraSpecs[ARRAY],
extraSpecs[POOL], volumeName, volumeStatus,
storageGroupName))
storageSystemName = volumeInstance['SystemName']
if not isValid:
LOG.error(_LE(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype."),
{'name': volumeName})
return False
if volume['host'] != host['host']:
LOG.debug(
"Retype Volume %(name)s from source host %(sourceHost)s "
"to target host %(targetHost)s.",
{'name': volumeName,
'sourceHost': volume['host'],
'targetHost': host['host']})
return self._migrate_volume_v3(
volume, volumeInstance, extraSpecs[POOL], targetSlo,
targetWorkload, storageSystemName, newType, extraSpecs)
return False
def _migrate_volume_v3(
self, volume, volumeInstance, poolName, targetSlo,
targetWorkload, storageSystemName, newType, extraSpecs):
"""Migrate from one slo/workload combination to another (V3).
This requires moving the volume from its current SG to a
new or existing SG that has the target attributes.
:param volume: the volume object
:param volumeInstance: the volume instance
:param poolName: the SRP Pool Name
:param targetSlo: the target SLO
:param targetWorkload: the target workload
:param storageSystemName: the storage system name
:param newType: the type to migrate to
:param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
volumeName = volume['name']
controllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
defaultSgName = self.utils.get_v3_storage_group_name(
extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD])
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, defaultSgName))
if foundStorageGroupInstanceName is None:
LOG.warning(_LW(
"Volume : %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
else:
self.provision.remove_device_from_storage_group(
self.conn,
controllerConfigService,
foundStorageGroupInstanceName,
volumeInstance.path,
volumeName, extraSpecs)
# Check that it has been removed.
sgFromVolRemovedInstanceName = (
self.utils.wrap_get_storage_group_from_volume(
self.conn, volumeInstance.path, defaultSgName))
if sgFromVolRemovedInstanceName is not None:
LOG.error(_LE(
"Volume : %(volumeName)s has not been "
"removed from source storage group %(storageGroup)s."),
{'volumeName': volumeName,
'storageGroup': sgFromVolRemovedInstanceName})
return False
storageGroupName = self.utils.get_v3_storage_group_name(
poolName, targetSlo, targetWorkload)
targetSgInstanceName = self._get_or_create_storage_group_v3(
poolName, targetSlo, targetWorkload, storageSystemName,
extraSpecs)
if targetSgInstanceName is None:
LOG.error(_LE(
"Failed to get or create storage group %(storageGroupName)s."),
{'storageGroupName': storageGroupName})
return False
self.masking.add_volume_to_storage_group(
self.conn, controllerConfigService, targetSgInstanceName,
volumeInstance, volumeName, storageGroupName, extraSpecs)
# Check that it has been added.
sgFromVolAddedInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, storageGroupName))
if sgFromVolAddedInstanceName is None:
LOG.error(_LE(
"Volume : %(volumeName)s has not been "
"added to target storage group %(storageGroup)s."),
{'volumeName': volumeName,
'storageGroup': targetSgInstanceName})
return False
return True
def _pool_migration(self, volumeInstance, volume, host,
volumeName, volumeStatus,
fastPolicyName, newType, extraSpecs):
"""Migrate from one pool to another (V2).
:param volumeInstance: the volume instance
:param volume: the volume object
:param host: the host object
:param volumeName: the name of the volume
:param volumeStatus: the volume status
:param fastPolicyName: the FAST policy Name
:param newType: the type to migrate to
:param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
storageSystemName = volumeInstance['SystemName']
isValid, targetPoolName, targetFastPolicyName = (
self._is_valid_for_storage_assisted_migration(
volumeInstance.path, host, storageSystemName,
volumeName, volumeStatus))
if not isValid:
LOG.error(_LE(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype."),
{'name': volumeName})
return False
if volume['host'] != host['host']:
LOG.debug(
"Retype Volume %(name)s from source host %(sourceHost)s "
"to target host %(targetHost)s.",
{'name': volumeName,
'sourceHost': volume['host'],
'targetHost': host['host']})
return self._migrate_volume(
volume, volumeInstance, targetPoolName, targetFastPolicyName,
fastPolicyName, extraSpecs, newType)
return False
def _update_pool_stats(
self, backendName, arrayInfo):
"""Update pool statistics (V2).
:param backendName: the backend name
:param arrayInfo: the arrayInfo
:returns: location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb
"""
if arrayInfo['FastPolicy']:
LOG.debug(
"Fast policy %(fastPolicyName)s is enabled on %(arrayName)s.",
{'fastPolicyName': arrayInfo['FastPolicy'],
'arrayName': arrayInfo['SerialNumber']})
else:
LOG.debug(
"No Fast policy for Array:%(arrayName)s "
"backend:%(backendName)s.",
{'arrayName': arrayInfo['SerialNumber'],
'backendName': backendName})
storageSystemInstanceName = self.utils.find_storageSystem(
self.conn, arrayInfo['SerialNumber'])
isTieringPolicySupported = (
self.fast.is_tiering_policy_enabled_on_storage_system(
self.conn, storageSystemInstanceName))
if (arrayInfo['FastPolicy'] is not None and
isTieringPolicySupported is True): # FAST enabled
(total_capacity_gb, free_capacity_gb, provisioned_capacity_gb,
array_max_over_subscription) = (
self.fast.get_capacities_associated_to_policy(
self.conn, arrayInfo['SerialNumber'],
arrayInfo['FastPolicy']))
LOG.info(_LI(
"FAST: capacity stats for policy %(fastPolicyName)s on array "
"%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu."),
{'fastPolicyName': arrayInfo['FastPolicy'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb})
else: # NON-FAST
(total_capacity_gb, free_capacity_gb, provisioned_capacity_gb,
array_max_over_subscription) = (
self.utils.get_pool_capacities(self.conn,
arrayInfo['PoolName'],
arrayInfo['SerialNumber']))
LOG.info(_LI(
"NON-FAST: capacity stats for pool %(poolName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu."),
{'poolName': arrayInfo['PoolName'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb})
location_info = ("%(arrayName)s#%(poolName)s#%(policyName)s"
% {'arrayName': arrayInfo['SerialNumber'],
'poolName': arrayInfo['PoolName'],
'policyName': arrayInfo['FastPolicy']})
return (location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_max_over_subscription)
def _set_v2_extra_specs(self, extraSpecs, poolRecord):
"""Set the VMAX V2 extra specs.
:param extraSpecs: extra specifications
:param poolRecord: pool record
:returns: dict -- the extraSpecs
:raises: VolumeBackendAPIException
"""
try:
stripedMetaCount = extraSpecs[STRIPECOUNT]
extraSpecs[MEMBERCOUNT] = stripedMetaCount
extraSpecs[COMPOSITETYPE] = STRIPED
LOG.debug(
"There are: %(stripedMetaCount)s striped metas in "
"the extra specs.",
{'stripedMetaCount': stripedMetaCount})
except KeyError:
memberCount = '1'
extraSpecs[MEMBERCOUNT] = memberCount
extraSpecs[COMPOSITETYPE] = CONCATENATED
LOG.debug("StripedMetaCount is not in the extra specs.")
# Get the FAST policy from the file. This value can be None if the
# user doesn't want to associate with any FAST policy.
if poolRecord['FastPolicy']:
LOG.debug("The fast policy name is: %(fastPolicyName)s.",
{'fastPolicyName': poolRecord['FastPolicy']})
extraSpecs[FASTPOLICY] = poolRecord['FastPolicy']
extraSpecs[ISV3] = False
extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"FastPolicy is: %(fastPolicy)s "
"CompositeType is: %(compositeType)s "
"MemberCount is: %(memberCount)s.",
{'pool': extraSpecs[POOL],
'array': extraSpecs[ARRAY],
'fastPolicy': extraSpecs[FASTPOLICY],
'compositeType': extraSpecs[COMPOSITETYPE],
'memberCount': extraSpecs[MEMBERCOUNT]})
return extraSpecs
def _set_v3_extra_specs(self, extraSpecs, poolRecord):
"""Set the VMAX V3 extra specs.
If SLO or workload are not specified then the default
values are NONE and the Optimized SLO will be assigned to the
volume.
:param extraSpecs: extra specifications
:param poolRecord: pool record
:returns: dict -- the extra specifications dictionary
"""
extraSpecs[SLO] = poolRecord['SLO']
extraSpecs[WORKLOAD] = poolRecord['Workload']
extraSpecs[ISV3] = True
extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"SLO is: %(slo)s "
"Workload is: %(workload)s.",
{'pool': extraSpecs[POOL],
'array': extraSpecs[ARRAY],
'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
return extraSpecs
def _set_common_extraSpecs(self, extraSpecs, poolRecord):
"""Set common extra specs.
The extraSpecs are common to v2 and v3
:param extraSpecs: extra specifications
:param poolRecord: pool record
:returns: dict -- the extra specifications dictionary
"""
extraSpecs[POOL] = poolRecord['PoolName']
extraSpecs[ARRAY] = poolRecord['SerialNumber']
extraSpecs[PORTGROUPNAME] = poolRecord['PortGroup']
if 'Interval' in poolRecord and poolRecord['Interval']:
extraSpecs[INTERVAL] = poolRecord['Interval']
LOG.debug("The user defined interval is : %(intervalInSecs)s.",
{'intervalInSecs': poolRecord['Interval']})
else:
LOG.debug("Interval not overridden, default of 10 assumed.")
if 'Retries' in poolRecord and poolRecord['Retries']:
extraSpecs[RETRIES] = poolRecord['Retries']
LOG.debug("The user defined retries is : %(retries)s.",
{'retries': poolRecord['Retries']})
else:
LOG.debug("Retries not overridden, default of 60 assumed.")
return extraSpecs
def _delete_from_pool(self, storageConfigService, volumeInstance,
volumeName, deviceId, fastPolicyName, extraSpecs):
"""Delete from pool (v2).
:param storageConfigService: the storage config service
:param volumeInstance: the volume instance
:param volumeName: the volume Name
:param deviceId: the device ID of the volume
:param fastPolicyName: the FAST policy name(if it exists)
:param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
storageSystemName = volumeInstance['SystemName']
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
if fastPolicyName is not None:
defaultStorageGroupInstanceName = (
self.masking.remove_device_from_default_storage_group(
self.conn, controllerConfigurationService,
volumeInstance.path, volumeName, fastPolicyName,
extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
"The volume: %(volumename)s. was not first part of the "
"default storage group for FAST policy %(fastPolicyName)s"
"."),
{'volumename': volumeName,
'fastPolicyName': fastPolicyName})
# Check if it is part of another storage group.
self._remove_device_from_storage_group(
controllerConfigurationService,
volumeInstance.path, volumeName, extraSpecs)
else:
# Check if volume is part of a storage group.
self._remove_device_from_storage_group(
controllerConfigurationService,
volumeInstance.path, volumeName, extraSpecs)
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigService: %(service)s TheElement: %(vol_instance)s "
"DeviceId: %(deviceId)s.",
{'service': storageConfigService,
'name': volumeName,
'vol_instance': volumeInstance.path,
'deviceId': deviceId})
try:
rc = self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
except Exception:
# If we cannot successfully delete the volume then we want to
# return the volume to the default storage group.
if (fastPolicyName is not None and
defaultStorageGroupInstanceName is not None and
storageSystemName is not None):
assocDefaultStorageGroupName = (
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService,
volumeInstance, volumeName, fastPolicyName,
extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to Roll back to re-add volume %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s. Please contact your sysadmin to "
"get the volume returned to the default "
"storage group."),
{'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
def _delete_from_pool_v3(self, storageConfigService, volumeInstance,
volumeName, deviceId, extraSpecs):
"""Delete from pool (v3).
:param storageConfigService: the storage config service
:param volumeInstance: the volume instance
:param volumeName: the volume Name
:param deviceId: the device ID of the volume
:param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
storageSystemName = volumeInstance['SystemName']
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
# Check if it is part of a storage group and delete it
# extra logic for case when volume is the last member.
self.masking.remove_and_reset_members(
self.conn, controllerConfigurationService, volumeInstance,
volumeName, extraSpecs, None, False)
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigServic: %(service)s TheElement: %(vol_instance)s "
"DeviceId: %(deviceId)s.",
{'service': storageConfigService,
'name': volumeName,
'vol_instance': volumeInstance.path,
'deviceId': deviceId})
try:
rc = self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
except Exception:
# If we cannot successfully delete the volume, then we want to
# return the volume to the default storage group,
# which should be the SG it previously belonged to.
self.masking.return_volume_to_default_storage_group_v3(
self.conn, controllerConfigurationService,
volumeInstance, volumeName, extraSpecs)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
def _create_clone_v2(self, repServiceInstanceName, cloneVolume,
sourceVolume, sourceInstance, isSnapshot,
extraSpecs):
"""Create a clone (v2).
:param repServiceInstanceName: the replication service
:param cloneVolume: the clone volume object
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: check to see if it is a snapshot
:param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
# Check if the source volume contains any meta devices.
metaHeadInstanceName = self.utils.get_volume_meta_head(
self.conn, sourceInstance.path)
if metaHeadInstanceName is None: # Simple volume.
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, None, extraSpecs, isSnapshot)
else: # Composite volume with meta device members.
# Check if the meta members capacity.
metaMemberInstanceNames = (
self.utils.get_composite_elements(
self.conn, sourceInstance))
volumeCapacities = self.utils.get_meta_members_capacity_in_byte(
self.conn, metaMemberInstanceNames)
LOG.debug("Volume capacities: %(metasizes)s.",
{'metasizes': volumeCapacities})
if len(set(volumeCapacities)) == 1:
LOG.debug("Meta volume all of the same size.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, None, extraSpecs, isSnapshot)
LOG.debug("Meta volumes are of different sizes, "
"%d different sizes.", len(set(volumeCapacities)))
baseTargetVolumeInstance = None
for volumeSizeInbits in volumeCapacities:
if baseTargetVolumeInstance is None: # Create base volume.
baseVolumeName = "TargetBaseVol"
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
_rc, baseVolumeDict, storageSystemName = (
self._create_composite_volume(
volume, baseVolumeName, volumeSizeInbits,
extraSpecs, 1))
baseTargetVolumeInstance = self.utils.find_volume_instance(
self.conn, baseVolumeDict, baseVolumeName)
LOG.debug("Base target volume %(targetVol)s created. "
"capacity in bits: %(capInBits)lu.",
{'capInBits': volumeSizeInbits,
'targetVol': baseTargetVolumeInstance.path})
else: # Create append volume
targetVolumeName = "MetaVol"
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystemName))
unboundVolumeInstance = (
self._create_and_get_unbound_volume(
self.conn, storageConfigService,
baseTargetVolumeInstance.path, volumeSizeInbits,
extraSpecs))
if unboundVolumeInstance is None:
exceptionMessage = (_(
"Error Creating unbound volume."))
LOG.error(exceptionMessage)
# Remove target volume
self._delete_target_volume_v2(storageConfigService,
baseTargetVolumeInstance,
extraSpecs)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Append the new unbound volume to the
# base target composite volume.
baseTargetVolumeInstance = self.utils.find_volume_instance(
self.conn, baseVolumeDict, baseVolumeName)
try:
elementCompositionService = (
self.utils.find_element_composition_service(
self.conn, storageSystemName))
compositeType = self.utils.get_composite_type(
extraSpecs[COMPOSITETYPE])
_rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn,
elementCompositionService,
baseTargetVolumeInstance,
unboundVolumeInstance.path,
targetVolumeName,
compositeType,
extraSpecs))
if modifiedVolumeDict is None:
exceptionMessage = (_(
"Error appending volume %(volumename)s to "
"target base volume.")
% {'volumename': targetVolumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception:
exceptionMessage = (_(
"Exception appending meta volume to target volume "
"%(volumename)s.")
% {'volumename': baseVolumeName})
LOG.error(exceptionMessage)
# Remove append volume and target base volume
self._delete_target_volume_v2(
storageConfigService, unboundVolumeInstance,
extraSpecs)
self._delete_target_volume_v2(
storageConfigService, baseTargetVolumeInstance,
extraSpecs)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("Create V2 replica for meta members of different sizes.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, baseTargetVolumeInstance, extraSpecs,
isSnapshot)
def _create_v2_replica_and_delete_clone_relationship(
self, repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, targetInstance, extraSpecs, isSnapshot=False):
"""Create a replica and delete the clone relationship.
:param repServiceInstanceName: the replication service
:param cloneVolume: the clone volume object
:param sourceVolume: the source volume object
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param extraSpecs: extra specifications
:param isSnapshot: check to see if it is a snapshot
:returns: int -- return code
:returns: dict -- cloneDict
"""
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
try:
rc, job = self.provision.create_element_replica(
self.conn, repServiceInstanceName, cloneName, sourceName,
sourceInstance, targetInstance, extraSpecs)
except Exception:
exceptionMessage = (_(
"Exception during create element replica. "
"Clone name: %(cloneName)s "
"Source name: %(sourceName)s "
"Extra specs: %(extraSpecs)s ")
% {'cloneName': cloneName,
'sourceName': sourceName,
'extraSpecs': extraSpecs})
LOG.error(exceptionMessage)
if targetInstance is not None:
# Check if the copy session exists.
storageSystem = targetInstance['SystemName']
syncInstanceName = self.utils.find_sync_sv_by_target(
self.conn, storageSystem, targetInstance, False)
if syncInstanceName is not None:
# Remove the Clone relationship.
rc, job = self.provision.delete_clone_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
extraSpecs, True)
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
self._delete_target_volume_v2(
storageConfigService, targetInstance, extraSpecs)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
cloneDict = self.provision.get_volume_dict_from_job(
self.conn, job['Job'])
fastPolicyName = extraSpecs[FASTPOLICY]
if isSnapshot:
if fastPolicyName is not None:
storageSystemName = sourceInstance['SystemName']
self._add_clone_to_default_storage_group(
fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs)
LOG.info(_LI("Snapshot creation %(cloneName)s completed. "
"Source Volume: %(sourceName)s."),
{'cloneName': cloneName,
'sourceName': sourceName})
return rc, cloneDict
cloneVolume['provider_location'] = six.text_type(cloneDict)
syncInstanceName, storageSystemName = (
self._find_storage_sync_sv_sv(cloneVolume, sourceVolume,
extraSpecs))
# Remove the Clone relationship so it can be used as a regular lun.
# 8 - Detach operation.
rc, job = self.provision.delete_clone_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
extraSpecs)
if fastPolicyName is not None:
self._add_clone_to_default_storage_group(
fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs)
return rc, cloneDict
def get_target_wwns_from_masking_view(
self, storageSystem, volume, connector):
"""Find target WWNs via the masking view.
:param storageSystem: the storage system name
:param volume: volume to be attached
:param connector: the connector dict
:returns: list -- the target WWN list
"""
targetWwns = []
mvInstanceName = self.get_masking_view_by_volume(volume, connector)
if mvInstanceName is not None:
targetWwns = self.masking.get_target_wwns(
self.conn, mvInstanceName)
LOG.info(_LI("Target wwns in masking view %(maskingView)s: "
"%(targetWwns)s."),
{'maskingView': mvInstanceName,
'targetWwns': six.text_type(targetWwns)})
return targetWwns
def get_port_group_from_masking_view(self, maskingViewInstanceName):
"""Get the port groups in a masking view.
:param maskingViewInstanceName: masking view instance name
:returns: portGroupInstanceName
"""
return self.masking.get_port_group_from_masking_view(
self.conn, maskingViewInstanceName)
def get_initiator_group_from_masking_view(self, maskingViewInstanceName):
"""Get the initiator group in a masking view.
:param maskingViewInstanceName: masking view instance name
:returns: initiatorGroupInstanceName
"""
return self.masking.get_initiator_group_from_masking_view(
self.conn, maskingViewInstanceName)
def get_masking_view_by_volume(self, volume, connector):
"""Given volume, retrieve the masking view instance name.
:param volume: the volume
:param connector: the connector object
:returns: maskingviewInstanceName
"""
LOG.debug("Finding Masking View for volume %(volume)s.",
{'volume': volume})
volumeInstance = self._find_lun(volume)
return self.masking.get_masking_view_by_volume(
self.conn, volumeInstance, connector)
def get_masking_views_by_port_group(self, portGroupInstanceName):
"""Given port group, retrieve the masking view instance name.
:param portGroupInstanceName: port group instance name
:returns: list -- maskingViewInstanceNames
"""
LOG.debug("Finding Masking Views for port group %(pg)s.",
{'pg': portGroupInstanceName})
return self.masking.get_masking_views_by_port_group(
self.conn, portGroupInstanceName)
def get_masking_views_by_initiator_group(
self, initiatorGroupInstanceName):
"""Given initiator group, retrieve the masking view instance name.
:param initiatorGroupInstanceName: initiator group instance name
:returns: list -- maskingViewInstanceNames
"""
LOG.debug("Finding Masking Views for initiator group %(ig)s.",
{'ig': initiatorGroupInstanceName})
return self.masking.get_masking_views_by_initiator_group(
self.conn, initiatorGroupInstanceName)
def _create_replica_v3(
self, repServiceInstanceName, cloneVolume,
sourceVolume, sourceInstance, isSnapshot, extraSpecs):
"""Create a replica.
V3 specific function, create replica for source volume,
including clone and snapshot.
:param repServiceInstanceName: the replication service
:param cloneVolume: the clone volume object
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: boolean -- check to see if it is a snapshot
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- cloneDict
"""
cloneName = cloneVolume['name']
# SyncType 7: snap, VG3R default snapshot is snapVx.
syncType = self.utils.get_num(SNAPVX, '16')
# Operation 9: Dissolve for snapVx.
operation = self.utils.get_num(DISSOLVE_SNAPVX, '16')
rsdInstance = None
targetInstance = None
if isSnapshot:
rsdInstance = self.utils.set_target_element_supplier_in_rsd(
self.conn, repServiceInstanceName, SNAPVX_REPLICATION_TYPE,
CREATE_NEW_TARGET, extraSpecs)
else:
targetInstance = self._create_duplicate_volume(
sourceInstance, cloneName, extraSpecs)
try:
_rc, job = (
self.provisionv3.create_element_replica(
self.conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, extraSpecs, targetInstance, rsdInstance))
except Exception:
LOG.warning(_LW(
"Clone failed on V3. Cleaning up the target volume. "
"Clone name: %(cloneName)s "),
{'cloneName': cloneName})
# Check if the copy session exists.
if targetInstance:
self._cleanup_target(
repServiceInstanceName, targetInstance, extraSpecs)
# Re-throw the exception.
raise
cloneDict = self.provisionv3.get_volume_dict_from_job(
self.conn, job['Job'])
targetVolumeInstance = (
self.provisionv3.get_volume_from_job(self.conn, job['Job']))
LOG.info(_LI("The target instance device id is: %(deviceid)s."),
{'deviceid': targetVolumeInstance['DeviceID']})
cloneVolume['provider_location'] = six.text_type(cloneDict)
syncInstanceName, _storageSystem = (
self._find_storage_sync_sv_sv(cloneVolume, sourceVolume,
extraSpecs, True))
rc, job = self.provisionv3.break_replication_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs)
return rc, cloneDict
def _cleanup_target(
self, repServiceInstanceName, targetInstance, extraSpecs):
"""cleanup target after exception
:param repServiceInstanceName: the replication service
:param targetInstance: the target instance
:param extraSpecs: extra specifications
"""
storageSystem = targetInstance['SystemName']
syncInstanceName = self.utils.find_sync_sv_by_target(
self.conn, storageSystem, targetInstance, False)
if syncInstanceName is not None:
# Break the clone relationship.
self.provisionv3.break_replication_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
DISSOLVE_SNAPVX, extraSpecs, True)
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
deviceId = targetInstance['DeviceID']
volumeName = targetInstance['Name']
self._delete_from_pool_v3(
storageConfigService, targetInstance, volumeName,
deviceId, extraSpecs)
def _delete_cg_and_members(
self, storageSystem, cgName, modelUpdate, volumes, extraSpecs):
"""Helper function to delete a consistencygroup and its member volumes.
:param storageSystem: storage system
:param cgName: consistency group name
:param modelUpdate: dict -- the model update dict
:param volumes: the list of member volumes
:param extraSpecs: extra specifications
:returns: dict -- modelUpdate
:returns: list -- the updated list of member volumes
:raises: VolumeBackendAPIException
"""
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
storageConfigservice = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
cgInstanceName = self._find_consistency_group(
replicationService, cgName)
if cgInstanceName is None:
exception_message = (_("Cannot find CG group %s.") % cgName)
raise exception.VolumeBackendAPIException(
data=exception_message)
memberInstanceNames = self._get_members_of_replication_group(
cgInstanceName)
self.provision.delete_consistency_group(
self.conn, replicationService, cgInstanceName, cgName,
extraSpecs)
if memberInstanceNames:
try:
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystem))
for memberInstanceName in memberInstanceNames:
self._remove_device_from_storage_group(
controllerConfigurationService,
memberInstanceName, 'Member Volume', extraSpecs)
LOG.debug("Deleting CG members. CG: %(cg)s "
"%(numVols)lu member volumes: %(memVols)s.",
{'cg': cgInstanceName,
'numVols': len(memberInstanceNames),
'memVols': memberInstanceNames})
if extraSpecs[ISV3]:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception:
for volumeRef in volumes:
volumeRef['status'] = 'error_deleting'
modelUpdate['status'] = 'error_deleting'
return modelUpdate, volumes
def _delete_target_volume_v2(
self, storageConfigService, targetVolumeInstance, extraSpecs):
"""Helper function to delete the clone target volume instance.
:param storageConfigService: storage configuration service instance
:param targetVolumeInstance: clone target volume instance
:param extraSpecs: extra specifications
"""
deviceId = targetVolumeInstance['DeviceID']
volumeName = targetVolumeInstance['Name']
rc = self._delete_from_pool(storageConfigService,
targetVolumeInstance,
volumeName, deviceId,
extraSpecs[FASTPOLICY],
extraSpecs)
return rc
def _validate_pool(self, volume):
"""Get the pool from volume['host'].
There may be backward compatibiliy concerns, so putting in a
check to see if a version has been added to provider_location.
If it has, we know we are at the current version, if not, we
assume it was created pre 'Pool Aware Scheduler' feature.
:param volume: the volume Object
:returns: string -- pool
:raises: VolumeBackendAPIException
"""
pool = None
# Volume is None in CG ops.
if volume is None:
return pool
# This check is for all operations except a create.
# On a create provider_location is None
try:
if volume['provider_location']:
version = self._get_version_from_provider_location(
volume['provider_location'])
if not version:
return pool
except KeyError:
return pool
try:
pool = volume_utils.extract_host(volume['host'], 'pool')
if pool:
LOG.debug("Pool from volume['host'] is %(pool)s.",
{'pool': pool})
else:
exceptionMessage = (_(
"Pool from volume['host'] %(host)s not found.")
% {'host': volume['host']})
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception as ex:
exceptionMessage = (_(
"Pool from volume['host'] failed with: %(ex)s.")
% {'ex': ex})
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return pool
def _get_version_from_provider_location(self, loc):
"""Get the version from the provider location.
:param loc: the provider_location dict
:returns: version or None
"""
version = None
try:
if isinstance(loc, six.string_types):
name = ast.literal_eval(loc)
version = name['version']
except KeyError:
pass
return version
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the existing volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: dict -- model_update
:raises: VolumeBackendAPIException
"""
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
arrayName, deviceId = self.utils.get_array_and_device_id(volume,
external_ref)
# Manage existing volume is not supported if fast enabled.
if extraSpecs[FASTPOLICY]:
LOG.warning(_LW(
"FAST is enabled. Policy: %(fastPolicyName)s."),
{'fastPolicyName': extraSpecs[FASTPOLICY]})
exceptionMessage = (_(
"Manage volume is not supported if FAST is enable. "
"FAST policy: %(fastPolicyName)s.")
% {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Check if the volume is attached by checking if in any masking view.
volumeInstanceName = (
self.utils.find_volume_by_device_id_on_array(self.conn,
arrayName, deviceId))
sgInstanceNames = (
self.utils.get_storage_groups_from_volume(
self.conn, volumeInstanceName))
for sgInstanceName in sgInstanceNames:
mvInstanceName = self.masking.get_masking_view_from_storage_group(
self.conn, sgInstanceName)
if mvInstanceName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"Volume is in masking view %(mv)s.")
% {'deviceId': deviceId,
'mv': mvInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Check if there is any associated snapshots with the volume.
cinderPoolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
repSessionInstanceName = (
self.utils.get_associated_replication_from_source_volume(
self.conn, storageSystemName, deviceId))
if repSessionInstanceName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"It is the source volume of replication session %(sync)s.")
% {'deviceId': deviceId,
'sync': repSessionInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Make sure the existing external volume is in the same storage pool.
volumePoolInstanceName = (
self.utils.get_assoc_pool_from_volume(self.conn,
volumeInstanceName))
volumePoolName = volumePoolInstanceName['InstanceID']
cinderPoolName = cinderPoolInstanceName['InstanceID']
LOG.debug("Storage pool of existing volume: %(volPool)s, "
"Storage pool currently managed by cinder: %(cinderPool)s.",
{'volPool': volumePoolName,
'cinderPool': cinderPoolName})
if volumePoolName != cinderPoolName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. The external "
"volume is not in the pool managed by current cinder host.")
% {'deviceId': deviceId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Rename the volume
volumeId = volume['name']
volumeElementName = self.utils.get_volume_element_name(volumeId)
LOG.debug("Rename volume %(vol)s to %(elementName)s.",
{'vol': volumeInstanceName,
'elementName': volumeElementName})
volumeInstance = self.utils.rename_volume(self.conn,
volumeInstanceName,
volumeElementName)
keys = {}
volpath = volumeInstance.path
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
model_update = {}
provider_location = {}
provider_location['classname'] = volpath['CreationClassName']
provider_location['keybindings'] = keys
model_update.update({'display_name': volumeElementName})
volume['provider_location'] = six.text_type(provider_location)
model_update.update({'provider_location': volume['provider_location']})
return model_update
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
LOG.debug("Volume in manage_existing_get_size: %(volume)s.",
{'volume': volume})
arrayName, deviceId = self.utils.get_array_and_device_id(volume,
external_ref)
volumeInstanceName = (
self.utils.find_volume_by_device_id_on_array(self.conn,
arrayName, deviceId))
volumeInstance = self.conn.GetInstance(volumeInstanceName)
byteSize = self.utils.get_volume_size(self.conn, volumeInstance)
gbSize = int(math.ceil(float(byteSize) / units.Gi))
LOG.debug(
"Size of volume %(deviceID)s is %(volumeSize)s GB.",
{'deviceID': deviceId,
'volumeSize': gbSize})
return gbSize
def unmanage(self, volume):
"""Export VMAX volume from Cinder.
Leave the volume intact on the backend array.
:param volume: the volume object
:raises: VolumeBackendAPIException
"""
volumeName = volume['name']
volumeId = volume['id']
LOG.debug("Unmanage volume %(name)s, id=%(id)s",
{'name': volumeName,
'id': volumeId})
self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
exceptionMessage = (_("Cannot find Volume: %(id)s. "
"unmanage operation. Exiting...")
% {'id': volumeId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
# Rename the volume to volumeId, thus remove the 'OS-' prefix.
volumeInstance = self.utils.rename_volume(self.conn,
volumeInstance,
volumeId)
def update_consistencygroup(self, group, add_volumes,
remove_volumes):
"""Updates LUNs in consistency group.
:param group: storage configuration service instance
:param add_volumes: the volumes uuids you want to add to the CG
:param remove_volumes: the volumes uuids you want to remove from
the CG
"""
LOG.info(_LI("Update Consistency Group: %(group)s. "
"This adds and/or removes volumes from a CG."),
{'group': group['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
volumeTypeId = group['volume_type_id'].replace(",", "")
cg_name = self.utils.truncate_string(group['id'], 8)
extraSpecs = self._initial_setup(None, volumeTypeId)
_poolInstanceName, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
add_vols = [vol for vol in add_volumes] if add_volumes else []
add_instance_names = self._get_volume_instance_names(add_vols)
remove_vols = [vol for vol in remove_volumes] if remove_volumes else []
remove_instance_names = self._get_volume_instance_names(remove_vols)
self.conn = self._get_ecom_connection()
try:
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
cgInstanceName = (
self._find_consistency_group(replicationService, cg_name))
if cgInstanceName is None:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=cg_name)
# Add volume(s) to a consistency group
if add_instance_names:
self.provision.add_volume_to_cg(
self.conn, replicationService, cgInstanceName,
add_instance_names, cg_name, None,
extraSpecs)
# Remove volume(s) from a consistency group
if remove_instance_names:
self.provision.remove_volume_from_cg(
self.conn, replicationService, cgInstanceName,
remove_instance_names, cg_name, None,
extraSpecs)
except exception.ConsistencyGroupNotFound:
raise
except Exception as ex:
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to update consistency group:"
" %(cgName)s.")
% {'cgName': cg_name})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate, None, None
def _get_volume_instance_names(self, volumes):
"""Get volume instance names from volume.
:param volumes: volume objects
:returns: volume instance names
"""
volumeInstanceNames = []
for volume in volumes:
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE("Volume %(name)s not found on the array."),
{'name': volume['name']})
else:
volumeInstanceNames.append(volumeInstance.path)
return volumeInstanceNames
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot, snapshots, source_cg,
source_vols):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the source consistency group
:param source_vols: the source vols
:returns: model_update, volumes_model_update
model_update is a dictionary of cg status
volumes_model_update is a list of dictionaries of volume
update
"""
LOG.debug("Enter EMCVMAXCommon::create_consistencygroup_from_src. "
"Group to be created: %(cgId)s, "
"Source snapshot: %(cgSnapshot)s.",
{'cgId': group['id'],
'cgSnapshot': cgsnapshot['consistencygroup_id']})
volumeTypeId = group['volume_type_id'].replace(",", "")
extraSpecs = self._initial_setup(None, volumeTypeId)
self.create_consistencygroup(context, group)
targetCgName = self.utils.truncate_string(group['id'], TRUNCATE_8)
if not snapshots:
exceptionMessage = (_("No source snapshots provided to create "
"consistency group %s.") % targetCgName)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
_poolInstanceName, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
try:
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
if replicationService is None:
exceptionMessage = (_(
"Cannot find replication service on system %s.") %
storageSystem)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
targetCgInstanceName = self._find_consistency_group(
replicationService, targetCgName)
LOG.debug("Create CG %(targetCg)s from snapshot.",
{'targetCg': targetCgInstanceName})
for volume, snapshot in zip(volumes, snapshots):
volumeSizeInbits = int(self.utils.convert_gb_to_bits(
snapshot['volume_size']))
targetVolumeName = 'targetVol'
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
if extraSpecs[ISV3]:
_rc, volumeDict, _storageSystemName = (
self._create_v3_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
else:
_rc, volumeDict, _storageSystemName = (
self._create_composite_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetVolumeName)
LOG.debug("Create target volume for member snapshot. "
"Source snapshot: %(snapshot)s, "
"Target volume: %(targetVol)s.",
{'snapshot': snapshot['id'],
'targetVol': targetVolumeInstance.path})
self.provision.add_volume_to_cg(self.conn,
replicationService,
targetCgInstanceName,
targetVolumeInstance.path,
targetCgName,
targetVolumeName,
extraSpecs)
sourceCgName = self.utils.truncate_string(cgsnapshot['id'],
TRUNCATE_8)
sourceCgInstanceName = self._find_consistency_group(
replicationService, sourceCgName)
if sourceCgInstanceName is None:
exceptionMessage = (_("Cannot find source CG instance. "
"consistencygroup_id: %s.") %
cgsnapshot['consistencygroup_id'])
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
relationName = self.utils.truncate_string(group['id'], TRUNCATE_5)
if extraSpecs[ISV3]:
self.provisionv3.create_group_replica(
self.conn, replicationService, sourceCgInstanceName,
targetCgInstanceName, relationName, extraSpecs)
else:
self.provision.create_group_replica(
self.conn, replicationService, sourceCgInstanceName,
targetCgInstanceName, relationName, extraSpecs)
# Break the replica group relationship.
rgSyncInstanceName = self.utils.find_group_sync_rg_by_target(
self.conn, storageSystem, targetCgInstanceName, extraSpecs,
True)
if rgSyncInstanceName is not None:
if extraSpecs[ISV3]:
# Operation 9: dissolve for snapVx
operation = self.utils.get_num(9, '16')
self.provisionv3.break_replication_relationship(
self.conn, replicationService, rgSyncInstanceName,
operation, extraSpecs)
else:
self.provision.delete_clone_relationship(
self.conn, replicationService,
rgSyncInstanceName, extraSpecs)
except Exception:
cgSnapshotId = cgsnapshot['consistencygroup_id']
exceptionMessage = (_("Failed to create CG %(cgName)s "
"from snapshot %(cgSnapshot)s.")
% {'cgName': targetCgName,
'cgSnapshot': cgSnapshotId})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
volumes_model_update = self.utils.get_volume_model_updates(
context, volumes, group['id'], modelUpdate['status'])
return modelUpdate, volumes_model_update
def _find_ip_protocol_endpoints(self, conn, storageSystemName,
portgroupname):
"""Find the IP protocol endpoint for ISCSI.
:param storageSystemName: the system name
:param portgroupname: the portgroup name
:returns: foundIpAddresses
"""
LOG.debug("The portgroup name for iscsiadm is %(pg)s",
{'pg': portgroupname})
foundipaddresses = []
configservice = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
portgroupinstancename = (
self.masking.find_port_group(conn, configservice, portgroupname))
iscsiendpointinstancenames = (
self.utils.get_iscsi_protocol_endpoints(
conn, portgroupinstancename))
for iscsiendpointinstancename in iscsiendpointinstancenames:
tcpendpointinstancenames = (
self.utils.get_tcp_protocol_endpoints(
conn, iscsiendpointinstancename))
for tcpendpointinstancename in tcpendpointinstancenames:
ipendpointinstancenames = (
self.utils.get_ip_protocol_endpoints(
conn, tcpendpointinstancename))
endpoint = {}
for ipendpointinstancename in ipendpointinstancenames:
endpoint = self.get_ip_and_iqn(conn, endpoint,
ipendpointinstancename)
if bool(endpoint):
foundipaddresses.append(endpoint)
return foundipaddresses
def _extend_v3_volume(self, volumeInstance, volumeName, newSize,
extraSpecs):
"""Extends a VMAX3 volume.
:param volumeInstance: volume instance
:param volumeName: volume name
:param newSize: new size the volume will be increased to
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: volumeDict
"""
new_size_in_bits = int(self.utils.convert_gb_to_bits(newSize))
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, volumeInstance['SystemName'])
volumeDict, rc = self.provisionv3.extend_volume_in_SG(
self.conn, storageConfigService, volumeInstance.path,
volumeName, new_size_in_bits, extraSpecs)
return rc, volumeDict
def _create_duplicate_volume(
self, sourceInstance, cloneName, extraSpecs):
"""Create a volume in the same dimensions of the source volume.
:param sourceInstance: the source volume instance
:param cloneName: the user supplied snap name
:param extraSpecs: additional info
:returns: targetInstance
"""
numOfBlocks = sourceInstance['NumberOfBlocks']
blockSize = sourceInstance['BlockSize']
volumeSizeInbits = numOfBlocks * blockSize
volume = {'size':
int(self.utils.convert_bits_to_gbs(volumeSizeInbits))}
_rc, volumeDict, _storageSystemName = (
self._create_v3_volume(
volume, cloneName, volumeSizeInbits, extraSpecs))
targetInstance = self.utils.find_volume_instance(
self.conn, volumeDict, cloneName)
LOG.debug("Create replica target volume "
"Source Volume: %(sourceVol)s, "
"Target Volume: %(targetVol)s.",
{'sourceVol': sourceInstance.path,
'targetVol': targetInstance.path})
return targetInstance
def get_ip_and_iqn(self, conn, endpoint, ipendpointinstancename):
"""Get ip and iqn from the endpoint.
:param conn: ecom connection
:param endpoint: end point
:param ipendpointinstancename: ip endpoint
:returns: endpoint
"""
if ('iSCSIProtocolEndpoint' in six.text_type(
ipendpointinstancename['CreationClassName'])):
iqn = self.utils.get_iqn(conn, ipendpointinstancename)
if iqn:
endpoint['iqn'] = iqn
elif ('IPProtocolEndpoint' in six.text_type(
ipendpointinstancename['CreationClassName'])):
ipaddress = (
self.utils.get_iscsi_ip_address(
conn, ipendpointinstancename))
if ipaddress:
endpoint['ip'] = ipaddress
return endpoint
| {
"content_hash": "89f07aa8b5319dcf739a05c1bd0214c7",
"timestamp": "",
"source": "github",
"line_count": 4589,
"max_line_length": 79,
"avg_line_length": 44.219001961211596,
"alnum_prop": 0.5764410780550067,
"repo_name": "cloudbase/cinder",
"id": "df509ccab29c92b22453d626b866b6488992ab38",
"size": "203564",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/emc/emc_vmax_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17586629"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
import builtins
import functools
import importlib
import inspect
import os
import pprint
from subprocess import call
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from jinja2 import Template
from sphinx.util.nodes import nested_parse_with_titles
from libqtile import command_object, configurable, widget
qtile_module_template = Template('''
.. qtile_class:: {{ module }}.{{ class_name }}
{% if no_config %}:no-config:{% endif %}
{% if no_commands %}:no-commands:{% endif %}
''')
qtile_class_template = Template('''
{{ class_name }}
{{ class_underline }}
.. autoclass:: {{ module }}.{{ class_name }}{% for arg in extra_arguments %}
{{ arg }}{% endfor %}
{% if is_widget %}
.. compound::
Supported bar orientations: {{ obj.orientations }}
{% endif %}
{% if configurable %}
.. list-table::
:widths: 20 20 60
:header-rows: 1
* - key
- default
- description
{% for key, default, description in defaults %}
* - ``{{ key }}``
- ``{{ default }}``
- {{ description }}
{% endfor %}
{% endif %}
{% if commandable %}
{% for cmd in commands %}
.. automethod:: {{ module }}.{{ class_name }}.{{ cmd }}
{% endfor %}
{% endif %}
''')
qtile_hooks_template = Template('''
.. automethod:: libqtile.hook.subscribe.{{ method }}
''')
# Adapted from sphinxcontrib-httpdomain
def import_object(module_name, expr):
mod = __import__(module_name)
mod = functools.reduce(getattr, module_name.split('.')[1:], mod)
globals = builtins
if not isinstance(globals, dict):
globals = globals.__dict__
return eval(expr, globals, mod.__dict__)
class SimpleDirectiveMixin:
has_content = True
required_arguments = 1
def make_rst(self):
raise NotImplementedError
def run(self):
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.make_rst():
result.append(line, '<{0}>'.format(self.__class__.__name__))
nested_parse_with_titles(self.state, result, node)
return node.children
def sphinx_escape(s):
return pprint.pformat(s, compact=False, width=10000)
class QtileClass(SimpleDirectiveMixin, Directive):
optional_arguments = 2
def make_rst(self):
module, class_name = self.arguments[0].rsplit('.', 1)
arguments = self.arguments[1:]
obj = import_object(module, class_name)
is_configurable = ':no-config:' not in arguments
is_commandable = ':no-commands:' not in arguments
arguments = [i for i in arguments if i not in (':no-config:', ':no-commands:')]
# build up a dict of defaults using reverse MRO
defaults = {}
for klass in reversed(obj.mro()):
if not issubclass(klass, configurable.Configurable):
continue
if not hasattr(klass, "defaults"):
continue
klass_defaults = getattr(klass, "defaults")
defaults.update({
d[0]: d[1:] for d in klass_defaults
})
# turn the dict into a list of ("value", "default", "description") tuples
defaults = [
(k, sphinx_escape(v[0]), sphinx_escape(v[1])) for k, v in sorted(defaults.items())
]
context = {
'module': module,
'class_name': class_name,
'class_underline': "=" * len(class_name),
'obj': obj,
'defaults': defaults,
'configurable': is_configurable and issubclass(obj, configurable.Configurable),
'commandable': is_commandable and issubclass(obj, command_object.CommandObject),
'is_widget': issubclass(obj, widget.base._Widget),
'extra_arguments': arguments,
}
if context['commandable']:
context['commands'] = [
attr for attr in dir(obj) if attr.startswith('cmd_')
]
rst = qtile_class_template.render(**context)
for line in rst.splitlines():
yield line
class QtileHooks(SimpleDirectiveMixin, Directive):
def make_rst(self):
module, class_name = self.arguments[0].rsplit('.', 1)
obj = import_object(module, class_name)
for method in sorted(obj.hooks):
rst = qtile_hooks_template.render(method=method)
for line in rst.splitlines():
yield line
class QtileModule(SimpleDirectiveMixin, Directive):
# :baseclass: <base class path>
# :no-commands:
# :no-config:
optional_arguments = 4
def make_rst(self):
module = importlib.import_module(self.arguments[0])
BaseClass = None
if ':baseclass:' in self.arguments:
BaseClass = import_object(*self.arguments[
self.arguments.index(':baseclass:') + 1].rsplit('.', 1))
for item in dir(module):
obj = import_object(self.arguments[0], item)
if not inspect.isclass(obj) and (BaseClass and
not isinstance(obj, BaseClass)):
continue
context = {
'module': self.arguments[0],
'class_name': item,
'no_config': ':no-config:' in self.arguments,
'no_commands': ':no-commands:' in self.arguments,
}
rst = qtile_module_template.render(**context)
for line in rst.splitlines():
if not line.strip():
continue
yield line
def generate_keybinding_images():
this_dir = os.path.dirname(__file__)
base_dir = os.path.abspath(os.path.join(this_dir, ".."))
call(['make', '-C', base_dir, 'run-ffibuild'])
call(['make', '-C', this_dir, 'genkeyimg'])
def setup(app):
generate_keybinding_images()
app.add_directive('qtile_class', QtileClass)
app.add_directive('qtile_hooks', QtileHooks)
app.add_directive('qtile_module', QtileModule)
| {
"content_hash": "13890b29c48c7e688f9080c7cfca98bd",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 94,
"avg_line_length": 31.46113989637306,
"alnum_prop": 0.5802042160737813,
"repo_name": "tych0/qtile",
"id": "6c7cd359ccf349908338ae320ae62f876a84101b",
"size": "7160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/sphinx_qtile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "1299146"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "8166"
}
],
"symlink_target": ""
} |
import tempfile
import schema as s
import caliban.util.schema as us
import pytest
def test_directory(tmpdir):
# Proper directories pass validation.
assert us.Directory.validate(tmpdir) == tmpdir
# random dirs that I made up dont!
with pytest.raises(s.SchemaError):
assert us.Directory.validate('random')
def test_file():
with tempfile.NamedTemporaryFile() as tmp:
# Existing files pass validation.
assert us.File.validate(tmp.name) == tmp.name
# random paths that I made up dont!
with pytest.raises(s.SchemaError):
assert us.File.validate('random')
| {
"content_hash": "008ed07847f8fc143c2b603ec2555198",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 49,
"avg_line_length": 23.48,
"alnum_prop": 0.7308347529812607,
"repo_name": "google/caliban",
"id": "af9bb5642108d600dd67621ce257a828608760b9",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/caliban/util/test_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3885"
},
{
"name": "Makefile",
"bytes": "1478"
},
{
"name": "Python",
"bytes": "547359"
},
{
"name": "Shell",
"bytes": "3071"
},
{
"name": "TeX",
"bytes": "3557"
}
],
"symlink_target": ""
} |
import argparse
import os
import tensorflow as tf
import tfmodel
import csv
parser = argparse.ArgumentParser()
parser.add_argument("--train_csv", type=str)
parser.add_argument("--test_csv", type=str)
parser.add_argument("--output_path", type=str, default="outputs")
parser.add_argument("--learning_rate", type=float, default=0.01)
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument("--n_classes", type=int, default=2)
parser.add_argument("--n_epochs", type=int, default=1)
args, unknown_args = parser.parse_known_args()
N_CLASSES = args.n_classes
BATCH_SIZE = args.batch_size
TRAIN_CSV = args.train_csv
TEST_CSV = args.test_csv
LEARNING_RATE = args.learning_rate
OUTPUT_PATH = args.output_path
N_EPOCHS = args.n_epochs
CHECKPOINT_DIR = os.path.join(OUTPUT_PATH, "checkpoints")
def build_queue(csv_file, num_epochs=None):
with tf.name_scope("queue"):
filename_queue = tf.train.string_input_producer([csv_file], num_epochs=num_epochs)
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
img_file_path, label = tf.decode_csv(value, record_defaults=[[""], [1]])
image = tf.image.decode_jpeg(tf.read_file(img_file_path), channels=3)
assert image.get_shape().as_list() == [None, None, 3]
image = tf.image.resize_images(image, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
assert image.get_shape().as_list() == [224, 224, 3]
# image.set_shape([224, 224, 3])
image = tf.cast(image, tf.float32)
# label = tf.one_hot(label, depth=N_CLASSES)
image_batch, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=BATCH_SIZE,
num_threads=64,
capacity=512,
min_after_dequeue=0
)
return image_batch, label_batch
def get_input_fn(csv_file, n_class, n_epoch):
def input_fn():
image_batch, label_batch = build_queue(csv_file=csv_file, num_epochs=n_epoch)
return {"images": image_batch}, tf.one_hot(label_batch, depth=n_class)
return input_fn
def generate_csv(filenames, output, labels):
image_file_paths = []
image_labels = []
for i, f in enumerate(filenames):
files = tf.gfile.Glob(filename=f)
l = [labels[i]] * len(files)
image_file_paths.extend(files)
image_labels.extend(l)
result = zip(image_file_paths, image_labels)
with tf.gfile.Open(output, mode="w") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(result)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.DEBUG)
run_config = tf.estimator.RunConfig().replace(
save_summary_steps=1,
)
clf = tfmodel.estimator.VGG16Classifier(
fc_units=[128],
n_classes=2,
model_dir="model",
config=run_config
)
input_fn = get_input_fn(csv_file="img/train.csv", n_epoch=5, n_class=2)
clf.train(input_fn=input_fn)
| {
"content_hash": "7dbe73a307ed0d804ab27fae7954b69e",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 104,
"avg_line_length": 34.7906976744186,
"alnum_prop": 0.6450534759358288,
"repo_name": "sfujiwara/tfmodel",
"id": "a3abd809ae2ea427259adfa23f765e5a6f77b55d",
"size": "3017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/image-classification/trainer/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29905"
}
],
"symlink_target": ""
} |
"""
Important classes of Spark SQL and DataFrames:
- :class:`pyspark.sql.SQLContext`
Main entry point for :class:`DataFrame` and SQL functionality.
- :class:`pyspark.sql.DataFrame`
A distributed collection of data grouped into named columns.
- :class:`pyspark.sql.Column`
A column expression in a :class:`DataFrame`.
- :class:`pyspark.sql.Row`
A row of data in a :class:`DataFrame`.
- :class:`pyspark.sql.HiveContext`
Main entry point for accessing data stored in Apache Hive.
- :class:`pyspark.sql.GroupedData`
Aggregation methods, returned by :func:`DataFrame.groupBy`.
- :class:`pyspark.sql.DataFrameNaFunctions`
Methods for handling missing data (null values).
- :class:`pyspark.sql.DataFrameStatFunctions`
Methods for statistics functionality.
- :class:`pyspark.sql.functions`
List of built-in functions available for :class:`DataFrame`.
- :class:`pyspark.sql.types`
List of data types available.
- :class:`pyspark.sql.Window`
For working with window functions.
"""
from __future__ import absolute_import
def since(version):
"""
A decorator that annotates a function to append the version of Spark the function was added.
"""
import re
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco
from pyspark.sql.types import Row
from pyspark.sql.context import SQLContext, HiveContext
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame, SchemaRDD, DataFrameNaFunctions, DataFrameStatFunctions
from pyspark.sql.group import GroupedData
from pyspark.sql.readwriter import DataFrameReader, DataFrameWriter
from pyspark.sql.window import Window, WindowSpec
__all__ = [
'SQLContext', 'HiveContext', 'DataFrame', 'GroupedData', 'Column', 'Row',
'DataFrameNaFunctions', 'DataFrameStatFunctions', 'Window', 'WindowSpec',
'DataFrameReader', 'DataFrameWriter'
]
| {
"content_hash": "b797ccd866edab96e483122335ecc90d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 100,
"avg_line_length": 37.41379310344828,
"alnum_prop": 0.6926267281105991,
"repo_name": "andrewor14/iolap",
"id": "ad9c891ba1c04963fc8d20ff167700bdb633c502",
"size": "2955",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "27246"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "15194"
},
{
"name": "HTML",
"bytes": "2508"
},
{
"name": "Java",
"bytes": "1248286"
},
{
"name": "JavaScript",
"bytes": "64772"
},
{
"name": "Makefile",
"bytes": "7771"
},
{
"name": "Python",
"bytes": "1272669"
},
{
"name": "R",
"bytes": "360680"
},
{
"name": "Roff",
"bytes": "5379"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "11975802"
},
{
"name": "Shell",
"bytes": "157990"
}
],
"symlink_target": ""
} |
import bisect
import functools
import imath
import six
import weakref
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
from ._TableView import _TableView
## The MessageWidget class displays a list of messages using the IECore MessageHandler
# format. Two display roles are available depending on the nature/quantity of
# the messages to be shown. Optional toolbars allow message navigation, search
# and severity selection.
class MessageWidget( GafferUI.Widget ) :
# Messages : For presenting longer messages in detail. They are shown as line-wrapped paragraphs.
# Log : For presenting a large number of messages in tabular form with un-wrapped lines.
Role = IECore.Enum.create( "Messages", "Log" )
# messageLevel : The minimum importance of message that will be displayed.
# role : The style of message presentation.
# toolbars : When true, search/navigation toolbars will be displayed with the widget.
# follow : When enabled, the widget will auto-scroll to the latest message unless the
# user has set a custom scroll position (scrolling to the end will re-enable).
def __init__( self, messageLevel = IECore.MessageHandler.Level.Info, role = Role.Messages, toolbars = False, follow = False, **kw ) :
rows = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=4 )
GafferUI.Widget.__init__( self, rows, **kw )
upperToolbar = None
with rows :
if toolbars :
upperToolbar = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
with upperToolbar :
GafferUI.Label( "Show" )
self.__levelWidget = _MessageLevelWidget()
self.__levelWidget.messageLevelChangedSignal().connect( Gaffer.WeakMethod( self.__messageLevelChanged ), scoped = False )
GafferUI.Spacer( imath.V2i( 6 ), preferredSize = imath.V2i( 100, 0 ) )
self.__table = _MessageTableView( follow = follow, expandRows = role is MessageWidget.Role.Messages )
if toolbars :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) :
shortcuts = self.__table.eventNavigationShortcuts()
toolTips = { l : "Click to jump to next {} message [{}]".format( l, shortcuts[l] ) for l in _messageLevels }
self.__summaryWidget = MessageSummaryWidget( displayLevel = IECore.MessageHandler.Level.Debug, hideUnusedLevels = False, buttonToolTip = toolTips )
self.__summaryWidget.levelButtonClickedSignal().connect( Gaffer.WeakMethod( self.__levelButtonClicked ), scoped = False )
GafferUI.Spacer( imath.V2i( 0 ) )
self.__toEndButton = GafferUI.Button( image = "scrollToBottom.png", hasFrame = False )
self.__toEndButton.setToolTip( "Scroll to bottom and follow new messages [B]" )
self.__toEndButton.buttonPressSignal().connect( Gaffer.WeakMethod( self.__table.scrollToLatest ), scoped = False )
GafferUI.Spacer( imath.V2i( 3 ), imath.V2i( 3 ) )
if toolbars :
upperToolbar.addChild( self.__table.searchWidget() )
self.__table.messageLevelChangedSignal().connect( Gaffer.WeakMethod( self.__messageLevelChanged ), scoped = False )
self.__table.messagesChangedSignal().connect( Gaffer.WeakMethod( self.__messagesChanged ), scoped = False )
if follow :
# When following, we manage the enabled state of the toEndButton based on the auto-scroll
# state of the table view. If we're not, then it should remain active the whole time.
self.__isFollowingMessagesChanged( self.__table )
self.__table.isFollowingMessagesChangedSignal().connect( Gaffer.WeakMethod( self.__isFollowingMessagesChanged ), scoped = False )
self.__messageHandler = _MessageHandler( self )
self.setMessageLevel( messageLevel )
self.setMessages( Gaffer.Private.IECorePreview.Messages() )
## Displays the specified messages. To add individual messages, submit them
# via the widget's message handler \see messageHandler().
def setMessages( self, messages ) :
self.__table.setMessages( messages )
## Returns (a copy of) the messages displayed by the widget.
def getMessages( self ) :
return Gaffer.Private.IECorePreview.Messages( self.__table.getMessages() )
## Clears all the displayed messages.
def clear( self ) :
self.__table.clear()
## Returns a MessageHandler which will output to this Widget.
## \threading It is safe to use the handler on threads other than the main thread.
def messageHandler( self ) :
return self.__messageHandler
## It can be useful to forward messages captured by this widget
# on to other message handlers - for instance to perform centralised
# logging in addition to local display. This method returns a
# CompoundMessageHandler which can be used for such forwarding -
# simply add a handler with forwardingMessageHandler().addHandler().
def forwardingMessageHandler( self ) :
return self.__messageHandler._forwarder
## Sets an IECore.MessageHandler.Level specifying which
# type of messages will be visible to the user - levels above
# that specified will be invisible. Note that the invisible
# messages are still stored, so they can be made visible at a later
# time by a suitable call to setMessageLevel(). This can be useful
# for revealing debug messages only after a warning or error has
# alerted the user to a problem.
def setMessageLevel( self, messageLevel ) :
assert( isinstance( messageLevel, IECore.MessageHandler.Level ) )
self.__table.setMessageLevel( messageLevel )
## Returns the current IECore.MessageHandler.Level at and below which
# messages will be shown in the widget.
def getMessageLevel( self ) :
return self.__table.getMessageLevel()
## Returns the number of messages being displayed, optionally
# restricted to the specified level.
def messageCount( self, level = None ) :
messages = self.__table.getMessages()
if level is None :
return len( messages )
else :
return messages.count( level )
## Navigates the view to the next message of the specified level,
# considering the current selection.
def scrollToNextMessage( self, messageLevel, select = True, wrap = True ) :
self.__table.scrollToNextMessage( messageLevel, select, wrap )
## Navigates the view to the previous message of the specified level,
# considering the current selection.
def scrollToPreviousMessage( self, messageLevel, select = True, wrap = True ) :
self.__table.scrollToPreviousMessage( messageLevel, select, wrap )
# Friendship for our internal message handler
def _addMessage( self, level, context, message ) :
self.__table.addMessage( Gaffer.Private.IECorePreview.Message( level, context, message ) )
# Signal callbacks - only called when toolbars are present
def __messageLevelChanged( self, widget ) :
messageLevel = widget.getMessageLevel()
self.__table.setMessageLevel( messageLevel )
self.__levelWidget.setMessageLevel( messageLevel )
def __levelButtonClicked( self, level ) :
if GafferUI.Widget.currentModifiers() == GafferUI.ModifiableEvent.Modifiers.Shift :
self.__table.scrollToPreviousMessage( level )
else :
self.__table.scrollToNextMessage( level )
def __messagesChanged( self, widget ) :
self.__summaryWidget.setMessages( widget.getMessages() )
def __isFollowingMessagesChanged( self, widget ) :
self.__toEndButton.setEnabled( not widget.isFollowingMessages() )
# ================
# Internal Classes
# ================
# A message handler that adds messages directly to the widgets messages container.
class _MessageHandler( IECore.MessageHandler ) :
def __init__( self, messageWidget ) :
IECore.MessageHandler.__init__( self )
self._forwarder = IECore.CompoundMessageHandler()
self.__processingEvents = False
# using a weak reference because we're owned by the MessageWidget,
# so we mustn't have a reference back.
self.__messageWidget = weakref.ref( messageWidget )
def handle( self, level, context, msg ) :
self._forwarder.handle( level, context, msg )
w = self.__messageWidget()
if w :
application = QtWidgets.QApplication.instance()
if QtCore.QThread.currentThread() == application.thread() :
w._addMessage( level, context, msg )
# Code like GafferCortexUI.OpDialogue has the option to run the op on the
# main thread. We want to update the ui as they occur, so we force the
# event loop to clear here. As processEvents may result in re-entry to this
# function (the called code may desire to log another message through this
# handler), we must guard against recursion so we don't run out of stack).
if not self.__processingEvents :
try :
self.__processingEvents = True
# Calling processEvents can cause almost anything to be executed,
# including idle callbacks that might build UIs. We must push an
# empty parent so that any widgets created will not be inadvertently
# parented to the wrong thing.
## \todo Calling `processEvents()` has also caused problems in the
# past where a simple error message has then led to idle callbacks
# being triggered which in turn triggered a graph evaluation. Having
# a message handler lead to arbitarary code execution is not good! Is
# there some way we can update the UI without triggering arbitrary
# code evaluation?
w._pushParent( None )
application.processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
w._popParent()
finally :
self.__processingEvents = False
else :
GafferUI.EventLoop.executeOnUIThread( functools.partial( w._addMessage, level, context, msg ) )
else :
# the widget has died. bad things are probably afoot so its best
# that we output the messages somewhere to aid in debugging.
IECore.MessageHandler.getDefaultHandler().handle( level, context, msg )
# =================
# Component Widgets
# =================
_messageLevels = (
IECore.MessageHandler.Level.Error, IECore.MessageHandler.Level.Warning,
IECore.MessageHandler.Level.Info, IECore.MessageHandler.Level.Debug
)
# Provides badge + count for each message level. The badges are clickable,
# \see levelButtonClickedSignal.
class MessageSummaryWidget( GafferUI.Widget ) :
# displayLevel : Only display counts or messages of this level or lower
# hideUnusedLevels : When true, counts will be hidden for unused message levels
# buttonToolTip : The tooltip to display on the count buttons. This can be a string, applied to all buttons
# or a dict, keyed by message level.
def __init__( self, displayLevel = IECore.MessageHandler.Level.Warning, hideUnusedLevels = True, buttonToolTip = None, **kw ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.Widget.__init__( self, row, **kw )
self.__hideUnusedLevels = hideUnusedLevels
# Keep in a local too to allow us to capture the signal in a lambda without dragging in self
buttonSignal = Gaffer.Signal1()
self.__levelButtonClickedSignal = buttonSignal
self.__buttons = {}
with row :
for level in _messageLevels :
if int( level ) > int( displayLevel ) :
break
button = GafferUI.Button( image = str(level).lower() + "Small.png", hasFrame = False )
button.clickedSignal().connect( functools.partial( lambda l, _ : buttonSignal( l ), level ), scoped = False )
if isinstance( buttonToolTip, dict ) :
button.setToolTip( buttonToolTip[ level ] )
elif isinstance( buttonToolTip, six.string_types ) :
button.setToolTip( buttonToolTip )
self.__buttons[ level ] = button
self.setMessages( Gaffer.Private.IECorePreview.Messages() )
# Emitted with the level of the button that was pressed
def levelButtonClickedSignal( self ) :
return self.__levelButtonClickedSignal
# Updates the button status and message count to that of the supplied messages
def setMessages( self, messages ) :
self.__messages = messages
for level, button in self.__buttons.items() :
count = messages.count( level )
button.setEnabled( count > 0 )
button.setText( "%d" % count )
if self.__hideUnusedLevels :
button.setVisible( count > 0 )
def getMessages( self ) :
return Gaffer.Private.IECorePreview.Messages( self.__messages )
## Provides a drop down menu to select an IECore.MessageHandler.Level
class _MessageLevelWidget( GafferUI.Widget ) :
def __init__( self, messageLevel = IECore.MessageHandler.Level.Info, **kw ) :
self.__menuButton = GafferUI.MenuButton( menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ) )
GafferUI.Widget.__init__( self, self.__menuButton, **kw )
self.__menuButton._qtWidget().setFixedWidth( 75 )
self.__level = None
self.__messageLevelChangedSignal = GafferUI.WidgetSignal()
self.setMessageLevel( messageLevel )
def setMessageLevel( self, level ) :
assert( isinstance( level, IECore.MessageHandler.Level ) )
if level == self.__level :
return
self.__menuButton.setText( str(level) )
self.__level = level
self.__messageLevelChangedSignal( self )
def getMessageLevel( self ) :
return self.__level
def messageLevelChangedSignal( self ) :
return self.__messageLevelChangedSignal
def __setMessageLevel( self, level, unused ) :
self.setMessageLevel( level )
def __menuDefinition( self ) :
menuDefinition = IECore.MenuDefinition()
for level in _messageLevels :
menuDefinition.append(
"/%s" % level,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setMessageLevel ), level ),
"checkBox" : self.__level == level
}
)
return menuDefinition
## Provides a search field along with result count display and navigation buttons for
# a _MessageTableView
class _MessageTableSearchWidget( GafferUI.Widget ) :
def __init__( self, tableView, **kw ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.Widget.__init__( self, row, **kw )
with row :
self.__results = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
with self.__results :
self.__resultCount = GafferUI.Label()
self.__prevButton = GafferUI.Button( image = "arrowLeft10.png", hasFrame = False )
self.__prevButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__nextButton = GafferUI.Button( image = "arrowRight10.png", hasFrame = False )
self.__nextButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__focusButton = GafferUI.Button( image = "searchFocusOff.png", hasFrame = False )
self.__focusButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__searchField = GafferUI.TextWidget()
# Edited catches focus-out and makes sure we update the search text
self.__searchField.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__textEdited ), scoped = False )
# Activated allows <enter> to repeatedly jump to the next search result
self.__searchField.activatedSignal().connect( Gaffer.WeakMethod( self.__textActivated ), scoped = False )
self.__searchField._qtWidget().setObjectName( "gafferSearchField" )
self.__searchField._qtWidget().setPlaceholderText( "Search" )
self.__searchField._qtWidget().setMaximumWidth( 250 )
self.__prevButton.setToolTip( "Show previous match [P]" )
self.__nextButton.setToolTip( "Show next match [N]" )
# Though Qt provides clearButtonEnabled(), this seems to be missing its icon on macOS, resulting in a
# clickable-but-not-visible clear button. As such we need to make our own. Icons need to be 16x16 exactly.
clearImage = GafferUI.Image( "clearSearch.png" )
self.__clearAction = QtWidgets.QAction( clearImage._qtIcon(), "Clear Search", None )
self.__clearAction.triggered.connect( Gaffer.WeakMethod( self.__clear ) )
self.__searchField._qtWidget().addAction( self.__clearAction, QtWidgets.QLineEdit.TrailingPosition )
self.__table = weakref.ref( tableView )
tableView.searchTextChangedSignal().connect( Gaffer.WeakMethod( self.__searchTextChanged ), scoped = False )
tableView.focusSearchResultsChangedSignal().connect( Gaffer.WeakMethod( self.__focusSearchResultsChanged ), scoped = False )
tableView.searchResultsChangedSignal().connect( Gaffer.WeakMethod( self.__searchResultsChanged ), scoped = False )
self.__searchTextChanged( tableView )
self.__focusSearchResultsChanged( tableView )
self.__searchResultsChanged( tableView )
def grabFocus( self ) :
self.__searchField.grabFocus()
self.__searchField.setSelection( None, None )
def __buttonClicked( self, button ) :
if button is self.__prevButton :
self.__table().scrollToPreviousSearchResult( self )
elif button is self.__nextButton :
self.__table().scrollToNextSearchResult( self )
elif button is self.__focusButton :
self.__table().setFocusSearchResults( not self.__table().getFocusSearchResults() )
def __textEdited( self, *unused ) :
self.__table().setSearchText( self.__searchField.getText() )
def __textActivated( self, *unused ) :
self.__table().setSearchText( self.__searchField.getText() )
if not self.__table().getFocusSearchResults() and self.__table().searchResultCount() > 0 :
self.__table().scrollToNextSearchResult()
def __clear( self ) :
self.__table().clearSearch()
self.grabFocus()
def __searchTextChanged( self, table ) :
text = table.getSearchText()
self.__searchField.setText( text )
self.__clearAction.setVisible( len(text) > 0 )
def __focusSearchResultsChanged( self, table ) :
isFocused = table.getFocusSearchResults()
haveResults = table.searchResultCount()
self.__focusButton.setImage( "searchFocusOn.png" if isFocused else "searchFocusOff.png" )
self.__focusButton.setToolTip( "Show all [S]" if isFocused else "Only show matches [S]" )
self.__prevButton.setEnabled( haveResults and not isFocused )
self.__nextButton.setEnabled( haveResults and not isFocused )
def __searchResultsChanged( self, table ) :
haveSearchString = len( table.getSearchText() ) > 0
self.__results.setVisible( haveSearchString )
isFocused = table.getFocusSearchResults()
count = table.searchResultCount()
self.__resultCount.setText( "{} match{}".format( count, "" if count == 1 else "es" ) )
self.__prevButton.setEnabled( count > 0 and not isFocused )
self.__nextButton.setEnabled( count > 0 and not isFocused )
## The main table view presenting messages to the user.
#
# The view manages three QAbstractItemModels :
#
# - __model : A model presenting the raw message data.
# - __filterModel : A proxy model filtering the message data for display based on the selected message level.
# - __searchModel : A side-car proxy model used to determine the result count for the free-text search
# independent of the current display.
#
# The table's view is driven from the __displayModel - which is set to either the __filterModel or __searchModel
# depending on getFocusSearchResults().
#
# The optional __displayTransform proxy facilitates 'expanded rows' display (\see MessageWidge.Role.Messages), to
# avoid duplication of search/message navigation coding.
#
class _MessageTableView( GafferUI.Widget ) :
SearchWidget = _MessageTableSearchWidget
# - follow : When set, the view will scroll to new messages as they are added (or to the end, when
# setMessages is used). If the user sets a custom scroll position, then following will be
# temporarily disabled. This state changed can be queried via isFollowingMessages.
# - expandRows : When set, a proxy model will be set in __displayTransform that unpacks message columns into
# separate rows.
def __init__( self, follow = False, expandRows = True, **kw ) :
tableView = _TableView()
GafferUI.Widget.__init__( self, tableView, **kw )
self.__setupSignals()
self.__setupModels( expandRows )
self.__setupAppearance( expandRows )
self.__didInitiateScroll = False
self.__userSetScrollPosition( False )
self.__setFollowMessages( follow )
self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
self.setMessageLevel( IECore.MessageHandler.Level.Info )
self.__searchWidget = None
# Provides a search widget controlling this view that can be embedded in the containing UI.
def searchWidget( self ) :
if self.__searchWidget is None :
self.__searchWidget = _MessageTableView.SearchWidget( self )
return self.__searchWidget
def setMessageLevel( self, level ) :
if self.__filterModel.getLevel() == level :
return
self.__filterModel.setLevel( level )
self.__scrollIfNeeded()
self.__messageLevelChangedSignal( self )
def getMessageLevel( self ) :
return self.__filterModel.getLevel()
def messageLevelChangedSignal( self ) :
return self.__messageLevelChangedSignal
# Message management
def setMessages( self, messages ) :
self.__model.setMessages( messages )
self.__scrollIfNeeded()
self.__messagesChangedSignal( self )
def getMessages( self ) :
return self.__model.getMessages()
def addMessage( self, message ) :
self.__model.addMessage( message )
self.__scrollIfNeeded()
self.__messagesChangedSignal( self )
def clear( self ) :
self.__userSetScrollPosition( False )
self.setMessages( Gaffer.Private.IECorePreview.Messages() )
def messagesChangedSignal( self ) :
return self.__messagesChangedSignal
# Search
def setSearchText( self, searchText ) :
if searchText == self.__searchText :
return
self.__searchText = searchText
self.__searchModel.setFilterWildcard( searchText )
if not searchText :
self.setFocusSearchResults( False )
self.__searchTextChangedSignal( self )
def getSearchText( self ) :
return self.__searchText
def clearSearch( self, *unused ) :
self.setSearchText( "" )
def searchResultCount( self ) :
if not self.getSearchText() :
return 0
return self.__searchModel.rowCount()
def scrollToNextSearchResult( self, *unused ) :
self.__navigateSearchResult( previous = False )
def scrollToPreviousSearchResult( self, *unused ) :
self.__navigateSearchResult( previous = True )
def setFocusSearchResults( self, focus ) :
if self.getFocusSearchResults() == focus :
return
self.__setDisplayModel( self.__searchModel if focus else self.__filterModel )
self.__focusSearchResultsChangedSignal( self )
def getFocusSearchResults( self ) :
return self.__displayModel == self.__searchModel
def focusSearchResultsChangedSignal( self ) :
return self.__focusSearchResultsChangedSignal
def searchTextChangedSignal( self ) :
return self.__searchTextChangedSignal
def searchResultsChangedSignal( self ) :
return self.__searchResultsChangedSignal
# Message navigation
def scrollToNextMessage( self, messageLevel, select = True, wrap = True ) :
assert( isinstance( messageLevel, IECore.MessageHandler.Level ) )
self.setFocusSearchResults( False )
if messageLevel > self.getMessageLevel() :
self.setMessageLevel( messageLevel )
nextMessageIndex = self.__findNextMessage( messageLevel, reverse = False, wrap = wrap )
self.__scrollToMessage( nextMessageIndex, select )
def scrollToPreviousMessage( self, messageLevel, select = True, wrap = True ) :
assert( isinstance( messageLevel, IECore.MessageHandler.Level ) )
self.setFocusSearchResults( False )
if messageLevel > self.getMessageLevel() :
self.setMessageLevel( messageLevel )
prevMessageIndex = self.__findNextMessage( messageLevel, reverse = True, wrap = wrap )
self.__scrollToMessage( prevMessageIndex, select )
def isFollowingMessages( self ) :
return not self.__userScrollPosition
def isFollowingMessagesChangedSignal( self ) :
return self.__isFollowingMessagesChangedSignal
def scrollToLatest( self, *unused ) :
self.__userSetScrollPosition( False )
self.__scrollToBottom()
__eventLevelShortcuts = {
"E" : IECore.MessageHandler.Level.Error,
"W" : IECore.MessageHandler.Level.Warning,
"I" : IECore.MessageHandler.Level.Info,
"D" : IECore.MessageHandler.Level.Debug,
}
def eventNavigationShortcuts( self ) :
return { v : k for k, v in self.__eventLevelShortcuts.items() }
# Internal
def __setupSignals( self ) :
self.__messagesChangedSignal = GafferUI.WidgetSignal()
self.__searchResultsChangedSignal = GafferUI.WidgetSignal()
self.__searchTextChangedSignal = GafferUI.WidgetSignal()
self.__focusSearchResultsChangedSignal = GafferUI.WidgetSignal()
self.__messageLevelChangedSignal = GafferUI.WidgetSignal()
self.__isFollowingMessagesChangedSignal = GafferUI.WidgetSignal()
def __setupModels( self, expandRows ) :
self.__model = _MessageTableModel()
self.__filterModel = _MessageTableFilterModel()
self.__filterModel.setSourceModel( self.__model )
self.__searchModel = QtCore.QSortFilterProxyModel()
self.__searchModel.setFilterCaseSensitivity( QtCore.Qt.CaseInsensitive )
self.__searchModel.setFilterKeyColumn( -1 )
self.__searchModel.setSourceModel( self.__filterModel )
searchResultsChangedSlot = Gaffer.WeakMethod( self.__searchResultsChanged )
self.__searchModel.rowsInserted.connect( searchResultsChangedSlot )
self.__searchModel.rowsRemoved.connect( searchResultsChangedSlot )
self.__searchModel.dataChanged.connect( searchResultsChangedSlot )
self.__searchModel.modelReset.connect( searchResultsChangedSlot )
# QSortFilterProxyModel doesn't support a transparent get/set (as it goes via regex)
self.__searchText = ""
self.__displayTransform = _MessageTableExpandedViewProxy() if expandRows else _MessageTableCollapseColumnsProxy()
self.__setDisplayModel( self.__filterModel )
self._qtWidget().setModel( self.__displayTransform )
def __setupAppearance( self, expandRows ) :
tableView = self._qtWidget()
tableView.setEditTriggers( tableView.NoEditTriggers )
tableView.setSelectionBehavior( QtWidgets.QAbstractItemView.SelectRows )
tableView.setSelectionMode( QtWidgets.QAbstractItemView.ContiguousSelection )
tableView.verticalHeader().setVisible( False )
tableView.horizontalHeader().setVisible( False )
tableView.setHorizontalScrollMode( tableView.ScrollPerPixel )
tableView.setShowGrid( False )
if expandRows :
tableView.horizontalHeader().setSectionResizeMode( 0, QtWidgets.QHeaderView.Stretch )
tableView.verticalHeader().setSectionResizeMode( QtWidgets.QHeaderView.ResizeToContents )
tableView.setWordWrap( True )
else :
tableView.verticalHeader().setSectionResizeMode( QtWidgets.QHeaderView.Fixed )
tableView.verticalHeader().setDefaultSectionSize( 14 )
# Fortunately we have a fixed set of known message levels so its ok to hard code this here
tableView.setColumnWidth( 0, 75 )
tableView.horizontalHeader().setStretchLastSection( True )
tableView.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAsNeeded )
tableView.setWordWrap( False )
def __setDisplayModel( self, model ) :
self.__displayModel = model
self.__displayTransform.setSourceModel( model )
#
# A display index refers to indices into self.__displayModel, before any self.__displayTransform
#
def __displayIndexForMessage( self, messageIndex ) :
displayIndex = self.__filterModel.mapFromSource( self.__model.index( messageIndex, 0 ) )
if self.__displayModel != self.__filterModel :
displayIndex = self.__displayModel.mapFromSource( displayIndex )
return displayIndex
# Selection
def __selectedDisplayIndexes( self ) :
displayIndexes = self._qtWidget().selectedIndexes()
if self.__displayTransform is not None :
displayIndexes = [ self.__displayTransform.mapToSource(i) for i in displayIndexes ]
return displayIndexes
def __selectDisplayIndex( self, index ) :
if not index.isValid() :
return
selectionMode = QtCore.QItemSelectionModel.ClearAndSelect | QtCore.QItemSelectionModel.Rows
selection = self._qtWidget().selectionModel()
if self.__displayTransform is not None :
# Expand the selection to make sure we have the whole row as the transform may map columns to rows
row = index.row()
lastColumn = index.model().columnCount() - 1
rowSelection = QtCore.QItemSelection( index.sibling( row, 0 ), index.sibling( row, lastColumn ) )
selection.select( self.__displayTransform.mapSelectionFromSource( rowSelection ), selectionMode )
else :
selection.select( index, selectionMode )
def __selectedMessageIndices( self ) :
# Gets back to the base model index, whose indices equate to the actual message container indices.
def messageModelIndex( index ) :
model = self.__displayModel
while hasattr( model, "sourceModel" ) :
index = model.mapToSource( index )
model = model.sourceModel()
return index
# remove duplicates, either due to the expanded display model, or multi-column selection
return sorted( { messageModelIndex( i ).row() for i in self.__selectedDisplayIndexes() } )
# Scrolling
def __scrollToDisplayIndex( self, index ) :
if not index.isValid() :
return
if self.__displayTransform is not None :
index = self.__displayTransform.mapFromSource( index )
self._qtWidget().scrollTo( index )
def __scrollToBottom( self ) :
self.__didInitiateScroll = True
self._qtWidget().scrollToBottom()
self.__didInitiateScroll = False
def __scrollToMessage( self, messageIndex, select ) :
if messageIndex is None :
return
displayIndex = self.__displayIndexForMessage( messageIndex )
if displayIndex.isValid() :
self.__scrollToDisplayIndex( displayIndex )
self.__userSetScrollPosition( True )
if select :
self.__selectDisplayIndex( displayIndex )
# Search result management
def __searchResultsChanged( self, *unused ) :
self.__searchResultsChangedSignal( self )
def __navigateSearchResult( self, previous = False ) :
if self.searchResultCount() == 0 :
return
selected = self.__selectedDisplayIndexes()
selectedIndex = selected[0] if selected else None
if selectedIndex is None :
row = ( self.__searchModel.rowCount() - 1 ) if previous else 0
resultIndex = self.__searchModel.mapToSource( self.__searchModel.index( row, 0 ) )
else :
resultIndex = self.__adjacentSearchResultDisplayIndex( selectedIndex, previous )
if resultIndex is not None :
self.__scrollToDisplayIndex( resultIndex )
self.__selectDisplayIndex( resultIndex )
def __adjacentSearchResultDisplayIndex( self, currentDisplayIndex, previous ) :
displayIsSearchModel = currentDisplayIndex.model() == self.__searchModel
if displayIsSearchModel :
currentResult = currentDisplayIndex
else :
currentResult = self.__searchModel.mapFromSource( currentDisplayIndex )
result = None
if currentResult.isValid() :
# If the selected row is already a search result, we simply increment/decrement to the next
currentRow = currentResult.row()
if previous :
if currentRow > 0 :
result = currentResult.sibling( currentRow - 1, 0 )
else :
if currentRow < currentResult.model().rowCount() - 1 :
result = currentResult.sibling( currentRow + 1, 0 )
else :
# Find the nearest result
result = self.__findNearestSearchResult( currentDisplayIndex, previous )
if result is not None :
return result if displayIsSearchModel else self.__searchModel.mapToSource( result )
# Exposes a proxy models source rows via the sequence interface
class __ModelToSourceRowsWrapper() :
def __init__( self, searchModel ) :
self.__model = searchModel
def __len__( self ) :
return self.__model.rowCount()
def __getitem__( self, index ) :
return self.__model.mapToSource( self.__model.index( index, 0 ) ).row()
def __findNearestSearchResult( self, displayIndex, before = False ) :
model = self.__searchModel
selectedDisplayRow = displayIndex.row()
# As bisect needs a sequence type, but we don't want to pre-generate a list of all result
# source rows, we wrap the model in a class that will convert the lookups to our source rows.
bisectable = _MessageTableView.__ModelToSourceRowsWrapper( model )
if before :
nearest = bisect.bisect_left( bisectable, selectedDisplayRow ) - 1
else :
nearest = bisect.bisect_right( bisectable, selectedDisplayRow )
if nearest < 0 or nearest == model.rowCount() :
return None
return model.index( nearest, 0 )
# Message navigation
def __findNextMessage( self, messageLevel, reverse = False, wrap = True ) :
lastIndex = len( self.__model.getMessages() ) - 1
searchStart = lastIndex if reverse else 0
selected = self.__selectedMessageIndices()
if selected :
i = selected[0]
searchStart = ( i - 1 ) if reverse else ( i + 1 )
nextMessageIndex = self.__nextMessageIndex( messageLevel, searchStart, 0 if reverse else lastIndex )
if nextMessageIndex is None and selected and wrap :
nextMessageIndex = self.__nextMessageIndex( messageLevel, lastIndex if reverse else 0, searchStart )
return nextMessageIndex
def __nextMessageIndex( self, messageLevel, startIndex, endIndex ) :
reverse = startIndex > endIndex
step = -1 if reverse else 1
rangeEnd = endIndex + step
messages = self.__model.getMessages()
if startIndex < 0 or startIndex >= len(messages) :
return None
for i in range( startIndex, rangeEnd, step ) :
if messages[i].level == messageLevel :
return i
return None
# Auto-follow
def __setFollowMessages( self, follow ) :
self.__followMessages = follow
if self.__followMessages :
slot = Gaffer.WeakMethod( self.__vScrollBarValueChanged )
self._qtWidget().verticalScrollBar().valueChanged.connect( slot )
def __scrollIfNeeded( self ) :
if not self.__followMessages :
return
if self.__userScrollPosition :
return
self.__scrollToBottom()
def __vScrollBarValueChanged( self, value ) :
if self.__didInitiateScroll :
return
if ( self._qtWidget().verticalScrollBar().maximum() - value ) == 0 :
self.__userSetScrollPosition( False )
else:
self.__userSetScrollPosition( True )
def __userSetScrollPosition( self, didSet ) :
self.__userScrollPosition = didSet
self.__isFollowingMessagesChangedSignal( self )
# Keyboard shortcuts
def __keyPress( self, unused, event ) :
if event.key == "C" and event.modifiers == event.Modifiers.Control :
self.__copySelectedRows()
return True
elif event.key == "A" and event.modifiers == event.Modifiers.Control :
self._qtWidget().selectAll()
return True
elif event.key == "F" and event.modifiers == event.Modifiers.Control and self.__searchWidget is not None :
self.__searchWidget.grabFocus()
return True
elif event.key in self.__eventLevelShortcuts :
if event.modifiers == event.Modifiers.None_ :
self.scrollToNextMessage( self.__eventLevelShortcuts[ event.key ] )
return True
elif event.modifiers == event.Modifiers.Shift :
self.scrollToPreviousMessage( self.__eventLevelShortcuts[ event.key ] )
return True
elif event.key == "S" and event.modifiers == event.Modifiers.None_ :
self.setFocusSearchResults( not self.getFocusSearchResults() )
return True
elif event.key == "P" and event.modifiers == event.Modifiers.None_ :
self.scrollToPreviousSearchResult()
return True
elif event.key == "N" and event.modifiers == event.Modifiers.None_ :
self.scrollToNextSearchResult()
return True
elif event.key in ( "End", "B" ) and event.modifiers == event.Modifiers.None_ :
self.__userSetScrollPosition( False )
self.__scrollToBottom()
return True
return False
# Copy/Paste
def __copySelectedRows( self ) :
# TODO only slected, can we get something for free from QT?
messageIndices = self.__selectedMessageIndices()
text = self.__plainTextForMessages( messageIndices )
QtWidgets.QApplication.clipboard().setText( text )
def __plainTextForMessages( self, messageIndices ) :
messages = self.getMessages()
indices = messageIndices or range( len(messages) )
text = ""
for i in indices :
m = messages[ i ]
text += "%s [%s] %s\n" % ( str(m.level).ljust(7).upper(), m.context, m.message )
return text
# Combines context and message to work around column sizing issues. Asking the table view
# to autoresize sections is prohibitively slow for the update rate that we receive messages.
# Having context as a separate column consequently requires either a fixed width, specified
# by the parent UI, or truncated contents. Neither or which are ideal. This proxy combines
# context/message such that we don't have to worry about how long the context string is.
# It would save some boilerplate if we derived from QIdentityProxyModel (though strictly, this
# wouldn't be an identity proxy), but it is missing from the bindings.
class _MessageTableCollapseColumnsProxy( QtCore.QAbstractProxyModel ) :
def columnCount( self, parent ) :
return 2
def rowCount( self, parent ) :
return self.sourceModel().rowCount()
def mapFromSource( self, sourceIndex ):
if not sourceIndex.isValid() or sourceIndex.row() < 0 :
return QtCore.QModelIndex()
# This does double up on indexes, but means you get the
# correct mapping for selection rectangles.
if sourceIndex.column() == 2 :
return self.index( sourceIndex.row(), 1 )
else :
return self.index( sourceIndex.row(), sourceIndex.column() )
def mapToSource( self, proxyIndex ) :
if not proxyIndex.isValid() or proxyIndex.row() < 0 :
return QtCore.QModelIndex()
if proxyIndex.column() == 1 :
return self.sourceModel().index( proxyIndex.row(), 2 )
else :
return self.sourceModel().index( proxyIndex.row(), proxyIndex.column() )
def data( self, index, role = QtCore.Qt.DisplayRole ) :
sourceModel = self.sourceModel()
if index.column() == 1 and role == QtCore.Qt.DisplayRole :
contextIndex = sourceModel.index( index.row(), 1 )
messageIndex = sourceModel.index( index.row(), 2 )
return "%s : %s" % (
sourceModel.data( contextIndex, role ),
sourceModel.data( messageIndex, role )
)
return sourceModel.data( self.mapToSource( index ), role )
def setSourceModel( self, model ) :
oldModel = self.sourceModel()
# We don't encounter column changes so we don't need to bother with those signals here.
# We don't have to worry about parent as it's always invalid as the model isn't a tree.
for signal in (
"modelReset", "rowsAboutToBeInserted", "rowsInserted", "rowsAboutToBeRemoved", "rowsRemoved"
) :
slot = getattr( self, signal )
if oldModel :
getattr( oldModel, signal ).disconnect( slot )
if model :
getattr( model, signal ).connect( slot )
if oldModel :
oldModel.dataChanged.disconnect( self.__dataChanged )
if model :
model.dataChanged.connect( self.__dataChanged )
self.beginResetModel()
QtCore.QAbstractProxyModel.setSourceModel( self, model )
self.endResetModel()
def index( self, row, column, parent = QtCore.QModelIndex() ) :
if parent.isValid() :
return QtCore.QModelIndex()
return self.createIndex( row, column )
def parent( self, index ) :
return QtCore.QModelIndex()
def __dataChanged( self, topLeft, bottomRight, roles ) :
self.dataChanged.emit( self.mapFromSource( topLeft ), self.mapFromSource( bottomRight ), roles )
# Expands messages into a two-row presentation, with level + context on one
# row, and the body of the message on the next.
class _MessageTableExpandedViewProxy( QtCore.QAbstractProxyModel ) :
def columnCount( self, parent ) :
return 1
def rowCount( self, parent ) :
if parent.isValid() :
return 0
return 2 * self.sourceModel().rowCount() if self.sourceModel() else 0
def mapFromSource( self, sourceIndex ) :
if not sourceIndex.isValid() or sourceIndex.row() < 0 :
return QtCore.QModelIndex()
row = sourceIndex.row() * 2
if sourceIndex.column() == int( _MessageTableModel.Column.Message ) :
row += 1
return self.index( row, 0 )
def mapToSource( self, proxyIndex ) :
if not proxyIndex.isValid() or proxyIndex.row() < 0 :
return QtCore.QModelIndex()
if proxyIndex.row() % 2 == 0 :
column = _MessageTableModel.Column.Level
else :
column = _MessageTableModel.Column.Message
return self.sourceModel().index( proxyIndex.row() // 2, int(column) )
def data( self, index, role = QtCore.Qt.DisplayRole ) :
source = self.sourceModel()
sourceIndex = self.mapToSource( index )
# We combine the level/context columns into one row, and the message into another
if index.row() % 2 == 0 :
levelIndex = source.index( sourceIndex.row(), 0 )
if role == QtCore.Qt.DisplayRole :
# Combine level/context
contextIndex = source.index( sourceIndex.row(), 1 )
return "%s: %s" % ( source.data( levelIndex, role ), source.data( contextIndex, role ) )
elif role == QtCore.Qt.ForegroundRole :
# In expanded mode, only colourise the header
return self.__headerColor( source.data( levelIndex, _MessageTableModel.ValueRole ) )
else :
if role == QtCore.Qt.DisplayRole :
# Add a new line to separate messages out
return "%s\n" % source.data( sourceIndex, role )
elif role == QtCore.Qt.ForegroundRole :
return GafferUI._Variant.toVariant( GafferUI._StyleSheet.styleColor( "foreground" ) )
return source.data( sourceIndex, role )
def index( self, row, column, parent = QtCore.QModelIndex() ) :
if parent.isValid() :
return QtCore.QModelIndex()
return self.createIndex( row, column )
def parent( self, index ) :
return QtCore.QModelIndex()
def setSourceModel( self, model ) :
oldModel = self.sourceModel()
# We don't encounter column changes so we don't need to bother with those signals here.
for signal in (
"dataChanged", "modelReset",
"rowsAboutToBeInserted", "rowsInserted", "rowsAboutToBeRemoved", "rowsRemoved"
) :
slot = getattr( self, "_MessageTableExpandedViewProxy__" + signal )
if oldModel :
getattr( oldModel, signal ).disconnect( slot )
if model :
getattr( model, signal ).connect( slot )
self.beginResetModel()
QtCore.QAbstractProxyModel.setSourceModel( self, model )
self.endResetModel()
def __headerColor( self, levelData ) :
# Sadly as QAbstractProxyModel is, well - abstract, we can't add a constructor of our own
# as python will complain we haven't called the base constructor. Uses _ to avoid mangling fun.
if not hasattr( self, "_colorMap" ) :
self._colorMap = {}
for l in _messageLevels :
self._colorMap[ int(l) ] = GafferUI._Variant.toVariant( GafferUI._StyleSheet.styleColor( "foreground%s" % l ) )
return self._colorMap[ levelData ]
# Signal forwarding.
def __dataChanged( self, topLeft, bottomRight, roles ) :
self.dataChanged.emit( self.index( topLeft.row() * 2, 0 ), self.index( ( bottomRight.row() * 2 ) + 1, 0 ), roles )
def __modelReset( self ) :
self.modelReset.emit()
def __rowsAboutToBeInserted( self, parent, start, end ) :
self.rowsAboutToBeInserted.emit( QtCore.QModelIndex(), start * 2, end * 2 + 1 )
def __rowsInserted( self, parent, start, end ) :
self.rowsInserted.emit( QtCore.QModelIndex(), start * 2, end * 2 + 1 )
def __rowsAboutToBeRemoved( self, parent, start, end ) :
self.rowsAboutToBeRemoved.emit( QtCore.QModelIndex(), start * 2, end * 2 + 1 )
def __rowsRemoved( self, parent, start, end ) :
self.rowsRemoved.emit( QtCore.QModelIndex(), start * 2, end * 2 + 1 )
# Provides filtering based on message level. This isn't directly used for search
# filtering as we often want search not to affect the number messages displayed.
class _MessageTableFilterModel( QtCore.QSortFilterProxyModel ) :
def __init__( self, level = IECore.MessageHandler.Level.Info, *kw ) :
QtCore.QSortFilterProxyModel.__init__( self, *kw )
self.setLevel( level )
def setLevel( self, level ) :
self.__maxLevel = level
self.invalidateFilter()
def getLevel( self ) :
return self.__maxLevel
# Overrides for methods inherited from QSortFilterProxyModel
# --------------------------------------------------------
def filterAcceptsRow( self, sourceRow, sourceParent ) :
levelIndex = self.sourceModel().index( sourceRow, _MessageTableModel.Column.Level, sourceParent )
return self.sourceModel().data( levelIndex, _MessageTableModel.ValueRole ) <= self.__maxLevel
# The base TabelModel representing the underlying message data.
class _MessageTableModel( QtCore.QAbstractTableModel ) :
ColumnCount = 3
Column = IECore.Enum.create( "Level", "Context", "Message" )
# A role to allow access the underlying Message data, without any display coercion.
ValueRole = 100
def __init__( self, messages = None, parent = None ) :
QtCore.QAbstractTableModel.__init__( self, parent )
self.__messages = messages
def setMessages( self, messages ) :
# We make use of existing rows here rather than resetting
# the model as it avoids flickering where the view first
# scrolls to the top, and then is re-scrolled back to the
# bottom.
firstDifference = messages.firstDifference( self.__messages ) if self.__messages is not None else 0
numRows = len( self.__messages ) if self.__messages else 0
targetNumRows = len( messages )
if targetNumRows > numRows :
self.beginInsertRows( QtCore.QModelIndex(), numRows, targetNumRows - 1 )
self.__messages = messages
self.endInsertRows()
elif targetNumRows < numRows :
self.beginRemoveRows( QtCore.QModelIndex(), targetNumRows, numRows - 1 )
self.__messages = messages
self.endRemoveRows()
else :
self.__messages = messages
if targetNumRows > 0 :
lastRowIndex = targetNumRows - 1
if firstDifference is not None :
self.dataChanged.emit(
self.index( firstDifference, 0 ),
self.index( lastRowIndex, self.columnCount() - 1 )
)
def getMessages( self ) :
return self.__messages
def addMessage( self, message ) :
nextIndex = self.rowCount()
self.beginInsertRows( QtCore.QModelIndex(), nextIndex, nextIndex )
self.__messages.add( message )
self.endInsertRows()
# Overrides for methods inherited from QAbstractTableModel
# --------------------------------------------------------
def rowCount( self, parent = QtCore.QModelIndex() ) :
if parent.isValid() :
return 0
if self.__messages is None :
return 0
return len( self.__messages )
def columnCount( self, parent = QtCore.QModelIndex() ) :
if parent.isValid() :
return 0
return _MessageTableModel.ColumnCount
def headerData( self, section, orientation, role ) :
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Horizontal :
return str( _MessageTableModel.Column( section ) )
def flags( self, index ) :
return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
def data( self, index, role ) :
if not index.isValid() :
return None
if role == QtCore.Qt.DisplayRole or role == _MessageTableModel.ValueRole :
message = self.__messages[ index.row() ]
if index.column() == int( _MessageTableModel.Column.Level ) :
return str(message.level).upper() if role == QtCore.Qt.DisplayRole else message.level
elif index.column() == int( _MessageTableModel.Column.Context ) :
return message.context
elif index.column() == int( _MessageTableModel.Column.Message ) :
return message.message
elif role == QtCore.Qt.ForegroundRole :
message = self.__messages[ index.row() ]
# Keep info level messages white
suffix = "" if message.level == IECore.MessageHandler.Level.Info else str( message.level )
return GafferUI._Variant.toVariant( GafferUI._StyleSheet.styleColor( "foreground%s" % suffix ) )
| {
"content_hash": "88e8af0db02c29aaacfcc107135ba06f",
"timestamp": "",
"source": "github",
"line_count": 1453,
"max_line_length": 152,
"avg_line_length": 32.68066070199587,
"alnum_prop": 0.718732231230915,
"repo_name": "boberfly/gaffer",
"id": "e08eb19ec00acb59091fe6fc7e9675d12581e536",
"size": "49288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferUI/MessageWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7646009"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "8002810"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
from setuptools import setup
VERSION = '0.26'
if __name__ == '__main__':
setup(
name='biofits',
packages=['biofits'],
version=VERSION,
description='Common biochemical data fitting functions',
author='Jim Rybarski',
author_email='[email protected]',
url='https://github.com/jimrybarski/biofits',
download_url='https://github.com/jimrybarski/biofits/tarball/%s' % VERSION,
keywords=['biology', 'biochemistry', 'kinetics', 'fitting'],
classifiers=['Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: Freely Distributable', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3']
)
| {
"content_hash": "0a2b9db17ac80ab296f6fd3b9ba3b8f5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 250,
"avg_line_length": 45.05882352941177,
"alnum_prop": 0.6331592689295039,
"repo_name": "jimrybarski/biofits",
"id": "7682c8525b29705e7b21b3346fe37abc1a27d168",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8577"
}
],
"symlink_target": ""
} |
import sys
import os
from setuptools import setup
import wb_scm_version
import py2app
min_py2app = [0, 11] # 0.11
py2app_version = [int(part) for part in py2app.__version__.split('.')]
if py2app_version < min_py2app:
raise ValueError( 'build requires at least version %d.%d of py2app' % tuple(min_py2app) )
v = wb_scm_version.__dict__
if sys.argv[1] == '--package':
del sys.argv[1]
else:
v.APP_NAME = v.APP_NAME + '-Devel'
short_version = '%(major)d.%(minor)d.%(patch)d' % v
info_string = '%(APP_NAME)s %(major)d.%(minor)d.%(patch)d %(commit)s ©%(copyright_years)s Barry A. Scott. All Rights Reserved.' % v
setup(
app =
['%s/Source/Scm/wb_scm_main.py' % (os.environ['BUILDER_TOP_DIR'],)],
data_files =
[],
options =
{'py2app':
{
'argv_emulation':
False,
'no_chdir':
True,
'iconfile':
'%s/wb.icns' % (os.environ['DIST_DIR'],),
'plist':
dict(
CFBundleIdentifier='%(APP_ID)s' % v,
CFBundleName='%(APP_NAME)s' % v,
CFBundleVersion=short_version,
CFBundleShortVersionString=short_version,
CFBundleGetInfoString=info_string,
# claim we know about dark mode
NSRequiresAquaSystemAppearance='false',
),
}},
setup_requires =
['py2app'],
)
| {
"content_hash": "54ba6bdac5163e9c8a85f30b84b5cff0",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 131,
"avg_line_length": 28.78846153846154,
"alnum_prop": 0.5170340681362725,
"repo_name": "barry-scott/scm-workbench",
"id": "dfa538ac4b8ef0f649bd0ddf6f9f025df41d7ddb",
"size": "1534",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Source/build_macos_py2app_setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9373"
},
{
"name": "HTML",
"bytes": "419028"
},
{
"name": "Inno Setup",
"bytes": "2599"
},
{
"name": "Makefile",
"bytes": "1079"
},
{
"name": "Python",
"bytes": "987917"
},
{
"name": "Roff",
"bytes": "460"
},
{
"name": "Shell",
"bytes": "30078"
}
],
"symlink_target": ""
} |
import hashlib
from webapp.web import BaseHandler
from model import dbapi
class RegisterHandler(BaseHandler):
def check_xsrf(self):
if self.check_xsrf_cookie() == False:
return False
return True
def get(self, error=""):
xsrf_token = self.xsrf_from_html()
params = {'error_info': error, 'xsrf_token': xsrf_token}
body = self.wrap_html('register.html', params)
return self.write(body)
def post(self):
if self.check_xsrf() == False:
error = "xsrf invalid"
return self.get(error)
email = self.get_arg('email')
email = email.strip()
password = self.get_arg('password')
password2 = self.get_arg('password2')
user = dbapi.User()
error = ""
if email and password == password2:
if user.get_user(email) == 0:
error = "user already exist"
else:
result = user.insert_user(email, password)
if result != -1:
#self.set_secure_cookie('email', str(email))
# change to sssion
m = hashlib.md5()
m.update(email)
email_md5 = m.hexdigest()
self.session["email"] = email
self.session["email_md5"] = email_md5
self.set_secure_cookie('sid', self.session.session_id)
return self.redirect("/user")
else:
error = "insert falure, try again later"
else:
if password != password2:
error = "password inconsistent"
else:
error = "missing argument"
return self.get(error)
| {
"content_hash": "2664d4c7b3bd0b01d801a82c88b4aa9e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 74,
"avg_line_length": 32.127272727272725,
"alnum_prop": 0.5070741369552915,
"repo_name": "vincentpc/yagra_for_wsgi",
"id": "79166b57a82592f23a7f8b94fd44aa0d57d17b17",
"size": "1805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2421"
},
{
"name": "JavaScript",
"bytes": "1733"
},
{
"name": "Python",
"bytes": "41247"
},
{
"name": "Shell",
"bytes": "6699"
}
],
"symlink_target": ""
} |
import copy
from datetime import datetime
from django.core.management.color import no_style
from django.core.management.sql import sql_create, sql_delete, sql_indexes
from django.db import connection, transaction, settings, models
from django.db.backends.util import truncate_name
from django.db.models.loading import cache
from django_evolution import signature
from django_evolution.tests import models as evo_test
from django_evolution.utils import write_sql, execute_sql
from django.contrib.contenttypes import models as contenttypes
DEFAULT_TEST_ATTRIBUTE_VALUES = {
models.CharField: 'TestCharField',
models.IntegerField: '123',
models.AutoField: None,
models.DateTimeField: datetime.now(),
models.PositiveIntegerField: '42'
}
def register_models(*models):
app_cache = {}
for name, model in models:
if model._meta.module_name in cache.app_models['django_evolution']:
del cache.app_models['django_evolution'][model._meta.module_name]
if model._meta.db_table.startswith("%s_%s" % (model._meta.app_label,
model._meta.module_name)):
model._meta.db_table = 'tests_%s' % name.lower()
model._meta.app_label = 'tests'
model._meta.object_name = name
model._meta.module_name = name.lower()
cache.app_models.setdefault('tests', {})[name.lower()] = model
app_cache[name.lower()] = model
return app_cache
def test_proj_sig(*models, **kwargs):
"Generate a dummy project signature based around a single model"
version = kwargs.get('version',1)
proj_sig = {
'tests': {},
'__version__': version,
}
# Compute the project siguature
for full_name,model in models:
parts = full_name.split('.')
if len(parts) == 1:
name = parts[0]
app = 'tests'
else:
app,name = parts
proj_sig.setdefault(app,{})[name] = signature.create_model_sig(model)
return proj_sig
def execute_transaction(sql, output=False):
"A transaction wrapper for executing a list of SQL statements"
try:
# Begin Transaction
transaction.enter_transaction_management()
transaction.managed(True)
cursor = connection.cursor()
# Perform the SQL
if output:
write_sql(sql)
execute_sql(cursor, sql)
transaction.commit()
transaction.leave_transaction_management()
except Exception, ex:
transaction.rollback()
raise ex
def execute_test_sql(start, end, sql, debug=False):
"""
Execute a test SQL sequence. This method also creates and destroys the
database tables required by the models registered against the test application.
start and end are the start- and end-point states of the application cache.
sql is the list of sql statements to execute.
cleanup is a list of extra sql statements required to clean up. This is
primarily for any extra m2m tables that were added during a test that won't
be cleaned up by Django's sql_delete() implementation.
debug is a helper flag. It displays the ALL the SQL that would be executed,
(including setup and teardown SQL), and executes the Django-derived setup/teardown
SQL.
"""
# Set up the initial state of the app cache
cache.app_models['tests'] = copy.deepcopy(start)
# Install the initial tables and indicies
style = no_style()
execute_transaction(sql_create(evo_test, style), output=debug)
execute_transaction(sql_indexes(evo_test, style), output=debug)
create_test_data(models.get_models(evo_test))
# Set the app cache to the end state
cache.app_models['tests'] = copy.deepcopy(end)
try:
# Execute the test sql
if debug:
write_sql(sql)
else:
execute_transaction(sql, output=True)
finally:
# Cleanup the apps.
if debug:
print sql_delete(evo_test, style)
else:
execute_transaction(sql_delete(evo_test, style), output=debug)
def create_test_data(app_models):
deferred_models = []
deferred_fields = {}
for model in app_models:
params = {}
deferred = False
for field in model._meta.fields:
if not deferred:
if type(field) == models.ForeignKey or type(field) == models.ManyToManyField:
related_model = field.rel.to
if related_model.objects.count():
related_instance = related_model.objects.all()[0]
else:
if field.null == False:
# Field cannot be null yet the related object hasn't been created yet
# Defer the creation of this model
deferred = True
deferred_models.append(model)
else:
# Field cannot be set yet but null is acceptable for the moment
deferred_fields[type(model)] = deferred_fields.get(type(model), []).append(field)
related_instance = None
if not deferred:
if type(field) == models.ForeignKey:
params[field.name] = related_instance
else:
params[field.name] = [related_instance]
else:
params[field.name] = DEFAULT_TEST_ATTRIBUTE_VALUES[type(field)]
if not deferred:
model(**params).save()
# Create all deferred models.
if deferred_models:
create_test_data(deferred_models)
# All models should be created (Not all deferred fields have been populated yet)
# Populate deferred fields that we know about.
# Here lies untested code!
if deferred_fields:
for model, field_list in deferred_fields.items():
for field in field_list:
related_model = field.rel.to
related_instance = related_model.objects.all()[0]
if type(field) == models.ForeignKey:
setattr(model, field.name, related_instance)
else:
getattr(model, field.name).add(related_instance)
model.save()
def test_sql_mapping(test_field_name):
engine = settings.DATABASE_ENGINE
sql_for_engine = __import__('django_evolution.tests.db.%s' % (settings.DATABASE_ENGINE), {}, {}, [''])
return getattr(sql_for_engine, test_field_name)
def deregister_models():
"Clear the test section of the app cache"
del cache.app_models['tests']
| {
"content_hash": "63d9e2bc9e9580172ccb41ac46dd864f",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 109,
"avg_line_length": 37.35675675675676,
"alnum_prop": 0.59238894515989,
"repo_name": "broderboy/ai-stager",
"id": "6a6f1e0bc35763f5f52e7380605a9bc224829e06",
"size": "6911",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "stager/django_evolution/tests/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "176775"
},
{
"name": "Python",
"bytes": "320655"
},
{
"name": "Racket",
"bytes": "1101"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
} |
{
'name' : 'Netherlands - Accounting',
'version' : '2.0',
'category': 'Localization',
'description': """
This is the module to manage the accounting chart for Netherlands in Odoo.
=============================================================================
This module installs the a standard chart of accounts and also the Dutch Tax codes and
fiscal positions for deliveries inside and outside the UE.
In the company settings you can make the following settings:
- The number of digits of the chart of accounts.
By default the chart of accounts are 6 digits. 4 is the minimum number of digits.
- If you want to use Anglosaxon acounting
- The prefix of the bank accounts, by default 1100
- The prefix of the cash account, by default 1000
- The way of rounding the VAT.
""",
'author' : 'Veritos - Jan Verlaan',
'website' : 'http://www.veritos.nl',
'depends' : ['account',
'base_vat',
'base_iban',
],
'data' : ['account_chart_netherlands.xml',
"account_fiscal_position_template.xml",
"account_fiscal_position_tax_template.xml",
"account_fiscal_position_account_template.xml",
"l10n_nl_account_chart.yml",
],
'demo' : [],
'installable': True,
}
| {
"content_hash": "74919aed75517bf93634acf2eaddad84",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 37.911764705882355,
"alnum_prop": 0.6051202482544609,
"repo_name": "vileopratama/vitech",
"id": "f04033aa86a2d7567b666c06a6e7ac759d5146ac",
"size": "1452",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "src/addons/l10n_nl/__openerp__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
"""
Created on Thu Oct 5 07:43:59 2017
@author: ddboline
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import json
from dateutil.parser import parse
import datetime
from threading import Condition
import argparse
from trakt import Trakt
from movie_collection_app.movie_collection import MovieCollection
from movie_collection_app.parse_imdb import parse_imdb_mobile_tv, parse_imdb
list_of_commands = ('list', 'search', 'add', 'cal', 'rm')
help_text = 'commands=%s,[number]' % ','.join(list_of_commands)
def read_credentials():
credentials = {}
with open('%s/.trakt/credentials' % os.getenv('HOME')) as f:
for line in f:
tmp = line.split('=')
if len(tmp) > 1:
key, val = [x.strip() for x in tmp][:2]
credentials[key] = val
return credentials
class TraktInstance(object):
auth_token = '%s/.trakt/auth_token.json' % os.getenv('HOME')
def __init__(self, username='ddboline', mq_=None):
credentials = read_credentials()
self.username = username
self.client_id = credentials['client_id']
self.client_secret = credentials['client_secret']
self.trakt = Trakt.configuration.defaults.client(
id=self.client_id, secret=self.client_secret)
if mq_ is not None:
self.mq_ = mq_
else:
self.mq_ = MovieCollection()
self.imdb_show_map = {v['link']: k for k, v in self.mq_.imdb_ratings.items()}
self.is_authenticating = Condition()
# Bind trakt events
Trakt.on('oauth.token_refreshed', self.on_token_refreshed)
self.authorization = self.read_auth()
if self.authorization is None:
self.authenticate()
def authenticate(self):
if not self.is_authenticating.acquire(blocking=False):
print('Authentication has already been started')
return False
# Request new device code
code = Trakt['oauth/device'].code()
print('Enter the code "%s" at %s to authenticate your account' %
(code.get('user_code'), code.get('verification_url')))
# Construct device authentication poller
poller = Trakt['oauth/device'].poll(**code)\
.on('aborted', self.on_aborted)\
.on('authenticated', self.on_authenticated)\
.on('expired', self.on_expired)\
.on('poll', self.on_poll)
# Start polling for authentication token
poller.start(daemon=False)
# Wait for authentication to complete
result = self.is_authenticating.wait()
self.store_auth()
return result
def run(self):
if not self.authorization:
print('ERROR: Authentication required')
exit(1)
else:
print('authorization:', self.authorization)
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
# Expired token will be refreshed automatically (as `refresh=True`)
print(Trakt['sync/watchlist'].shows(pagination=True))
def get_watchlist_shows(self):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
shows = Trakt['sync/watchlist'].shows(pagination=True)
if shows is None:
return {}
return {x.get_key('imdb'): x for x in shows.values()}
def get_watchlist_seasons(self):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
return Trakt['sync/watchlist'].seasons(pagination=True)
def get_watchlist_episodes(self):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
return Trakt['sync/watchlist'].episodes(pagination=True)
def get_watched_shows(self, imdb_id=None):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
results = {}
watched = Trakt['sync/watched'].shows(pagination=True)
if watched is None:
return {}
for show in watched.values():
title = show.title
imdb_url = show.get_key('imdb')
if imdb_id is not None and imdb_url != imdb_id:
continue
episodes = {}
for (season, epi), episode in show.episodes():
episodes[(season, epi)] = {
'title': title,
'imdb_url': imdb_url,
'season': season,
'episode': epi
}
results[imdb_url] = episodes
return results
def on_aborted(self):
"""Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event)
"""
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_expired(self):
"""Device authentication expired."""
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_poll(self, callback):
"""Device authentication poll.
:param callback: Call with `True` to continue polling, or `False` to abort polling
:type callback: func
"""
# Continue polling
callback(True)
def on_token_refreshed(self, authorization):
# OAuth token refreshed, store authorization for future calls
self.authorization = authorization
print('Token refreshed - authorization: %r' % self.authorization)
def store_auth(self):
with open(self.auth_token, 'w') as f:
json.dump(self.authorization, f)
def read_auth(self):
if not os.path.exists(self.auth_token):
return None
with open(self.auth_token, 'r') as f:
return json.load(f)
def get_imdb_rating(self, show, imdb_url, type_='tv'):
if show in self.mq_.imdb_ratings:
return self.mq_.imdb_ratings[show]
show_ = show.replace('_', ' ')
title = None
if type_ == 'tv':
title, imdb_link, rating = parse_imdb_mobile_tv(show_, proxy=False)
else:
for title, imdb_link, rating in parse_imdb(show_, proxy=False):
if 'TV Series' not in title and 'TV Mini-Series' not in title:
break
if imdb_link != imdb_url:
print('Bad imdb link %s %s %s' % (show, imdb_link, imdb_url))
if title is None:
return {
'show': show,
'title': title,
'link': None,
'rating': -1,
'istv': False,
'index': -1
}
title = title.replace("'", '')
print(show, title, imdb_link, rating)
idx = list(self.mq_.con.execute("select max(index) from imdb_ratings"))
idx = idx[0][0]
row_dict = {
'show': show,
'title': title,
'link': imdb_link,
'rating': rating,
'istv': type_ == 'tv',
'index': idx + 1
}
self.mq_.imdb_ratings[show] = row_dict
keys, vals = zip(*row_dict.items())
self.mq_.con.execute("insert into imdb_ratings (%s) values ('%s')" %
(', '.join(keys), "', '".join('%s' % x for x in vals)))
return self.mq_.imdb_ratings[show]
def do_lookup(self, imdb_id):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
return Trakt['search'].lookup(id=imdb_id, service='imdb')
def do_query(self, show, media='show'):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
if show in self.mq_.imdb_ratings:
imdb = self.mq_.imdb_ratings[show]['link']
show = self.do_lookup(imdb_id=imdb)
return {imdb: show}
else:
shows = Trakt['search'].query(show.replace('_', ' '), media=media, pagination=True)
shows = {s.get_key('imdb'): s for s in shows}
return shows
def add_show_to_watchlist(self, show=None, imdb_id=None):
if imdb_id:
show_obj = self.do_lookup(imdb_id)
elif show:
show_obj = self.do_query(show)
if isinstance(show_obj, list):
if len(show_obj) < 1:
return
else:
show_obj = show_obj[0]
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
items = {'shows': [show_obj.to_dict()]}
print(show_obj)
return Trakt['sync/watchlist'].add(items=items)
def add_episode_to_watched(self, show=None, imdb_id=None, season=None, episode=None):
if imdb_id:
show_obj = self.do_lookup(imdb_id)
elif show:
show_obj = self.do_query(show)
if isinstance(show_obj, list):
if len(show_obj) < 1:
return
else:
show_obj = show_obj[0]
if season and episode:
episode_ = Trakt['shows'].episode(
show_obj.get_key('imdb'), season=season, episode=episode)
if not episode_:
return False
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
items = {'episodes': [episode_.to_dict()]}
print(episode_)
return Trakt['sync/history'].add(items=items)
elif season:
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
episodes = Trakt['shows'].season(show_obj.get_key('imdb'), season=season)
if not episodes:
return False
episodes = [e.to_dict() for e in episodes]
items = {'episodes': episodes}
print(episodes)
return Trakt['sync/history'].add(items=items)
def remove_show_to_watchlist(self, show=None, imdb_id=None):
if imdb_id:
show_obj = self.do_lookup(imdb_id)
elif show:
show_obj = self.do_query(show)
if isinstance(show_obj, list):
if len(show_obj) < 1:
return
else:
show_obj = show_obj[0]
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
items = {'shows': [show_obj.to_dict()]}
print(show_obj)
return Trakt['sync/watchlist'].remove(items=items)
def remove_episode_to_watched(self, show=None, imdb_id=None, season=None, episode=None):
if imdb_id:
show_obj = self.do_lookup(imdb_id)
elif show:
show_obj = self.do_query(show)
if isinstance(show_obj, list):
if len(show_obj) < 1:
return
else:
show_obj = show_obj[0]
if season and episode:
episode_ = Trakt['shows'].episode(
show_obj.get_key('imdb'), season=season, episode=episode)
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
items = {'episodes': [episode_.to_dict()]}
print(episode_)
return Trakt['sync/history'].remove(items=items)
elif season:
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
episodes = []
for episode_ in Trakt['shows'].season(show_obj.get_key('imdb'), season=season):
episodes.append(episode_.to_dict())
items = {'episodes': episodes}
print(episodes)
return Trakt['sync/history'].remove(items=items)
def add_movie_to_watched(self, title=None, imdb_id=None):
if imdb_id:
show_obj = self.do_lookup(imdb_id)
elif title:
show_obj = self.do_query(title)
if isinstance(show_obj, list):
if len(show_obj) < 1:
return
else:
show_obj = show_obj[0]
if isinstance(show_obj, dict):
if not show_obj:
return
show_obj.values()[0]
print(show_obj)
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
items = {'movies': [show_obj.to_dict()]}
print(show_obj)
return Trakt['sync/history'].add(items=items)
def get_calendar(self):
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
return Trakt['calendars/my/*'].get(media='shows', pagination=True)
def print_trakt_cal_episode(self, args):
do_hulu = False
do_source = False
do_shows = False
do_trakt = False
maxdate = datetime.date.today() + datetime.timedelta(days=90)
for arg in args:
try:
maxdate = parse(arg).date()
continue
except (TypeError, ValueError):
pass
if arg in ('hulu', 'netflix', 'amazon'):
do_source = arg
elif arg == 'all':
do_source = arg
elif arg == 'trakt':
do_trakt = True
output = []
cal = self.get_calendar()
if cal is None:
return
for ep_ in cal:
show = ep_.show.title
season, episode = ep_.pk
airdate = ep_.first_aired.date()
if airdate > maxdate:
continue
airdate = airdate.isoformat()
imdb_url = ep_.show.get_key('imdb')
show = self.imdb_show_map.get(imdb_url, show)
if (do_source != 'all' and do_source in ('hulu', 'netflix', 'amazon') and
self.mq_.imdb_ratings.get(show, {}).get('source') != do_source):
continue
if (not do_source and self.mq_.imdb_ratings.get(
show, {}).get('source') in ('hulu', 'netflix', 'amazon')):
continue
eprating, rating = -1, -1
rating = self.mq_.imdb_ratings.get(show, {}).get('rating', -1)
title = self.mq_.imdb_ratings.get(show, {}).get('title', show)
eprating = self.mq_.imdb_episode_ratings.get(show, {}).get((season, episode), {}).get(
'rating', -1)
eptitle = self.mq_.imdb_episode_ratings.get(show, {}).get((season, episode), {}).get(
'eptitle', show)
output.append('%s %s %s %d %d %0.2f/%0.2f %s' % (show, title, eptitle, season, episode,
eprating, rating, airdate))
print('\n'.join(output))
def trakt_parse():
parser = argparse.ArgumentParser(description='find_new_episodes script')
parser.add_argument('command', nargs='*', help=help_text)
args = parser.parse_args()
_command = 'list'
_args = []
if hasattr(args, 'command'):
for arg in args.command:
if arg in list_of_commands:
_command = arg
else:
_args.append(arg)
ti_ = TraktInstance()
if _command == 'list':
if len(_args) == 0 or _args[0] == 'watchlist':
print('\n'.join('%s : %s' % (k, v) for k, v in ti_.get_watchlist_shows().items()))
elif _args[0] == 'watched':
if len(_args) > 1:
imdb = _args[1]
if imdb in ti_.mq_.imdb_ratings:
imdb = ti_.mq_.imdb_ratings[imdb]['link']
if len(_args) > 2:
print('\n'.join(
'%s : %s' % (k, v)
for k, v in sorted(ti_.get_watched_shows(imdb_id=imdb)[imdb].items())
if v['season'] == int(_args[2])))
else:
print('\n'.join('%s : %s' % (k, v) for k, v in sorted(
ti_.get_watched_shows(imdb_id=imdb).get(imdb, {}).items())))
else:
print('\n'.join('%s : %s %s' % (k, [x['title'] for x in v.values()][0], len(v))
for k, v in ti_.get_watched_shows().items()))
elif _command == 'search':
print('\n'.join(['%s %s' % (k, v) for k, v in ti_.do_query(_args[0]).items()]))
elif _command == 'add':
imdb = _args[1]
if imdb in ti_.mq_.imdb_ratings:
imdb = ti_.mq_.imdb_ratings[imdb]['link']
if _args[0] == 'watched':
if _args[1] == 'tv':
imdb = _args[2]
if imdb in ti_.mq_.imdb_ratings:
imdb = ti_.mq_.imdb_ratings[imdb]['link']
season, episodes = _args[3], [None]
if len(_args) > 4:
episodes = map(int, _args[4].split(','))
for episode in episodes:
print(season, episode)
print(ti_.do_lookup(imdb_id=imdb), season, episode)
print(ti_.add_episode_to_watched(imdb_id=imdb, season=season, episode=episode))
else:
print(ti_.add_movie_to_watched(imdb_id=imdb))
print(ti_.add_movie_to_watched(title=imdb))
elif _args[0] == 'watchlist':
print(ti_.add_show_to_watchlist(imdb_id=imdb))
elif _command == 'rm':
imdb = _args[1]
if imdb in ti_.mq_.imdb_ratings:
imdb = ti_.mq_.imdb_ratings[imdb]['link']
if _args[0] == 'watched':
if _args[1] == 'tv':
imdb = _args[2]
if imdb in ti_.mq_.imdb_ratings:
imdb = ti_.mq_.imdb_ratings[imdb]['link']
season, episode = _args[3], None
if len(_args) > 4:
episode = _args[4]
print(ti_.do_lookup(imdb_id=imdb), season, episode)
print(ti_.remove_episode_to_watched(imdb_id=imdb, season=season, episode=episode))
elif _args[0] == 'watchlist':
print(ti_.remove_show_to_watchlist(imdb_id=imdb))
elif _command == 'cal':
ti_.print_trakt_cal_episode(_args)
| {
"content_hash": "6e45314120dfc0317f17b7fc5989e85b",
"timestamp": "",
"source": "github",
"line_count": 504,
"max_line_length": 99,
"avg_line_length": 37.96230158730159,
"alnum_prop": 0.5356713531594627,
"repo_name": "ddboline/movie_collection_app",
"id": "3f12b2d51c6fe6c57d39348cc5e9cad6e63c4180",
"size": "19175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "movie_collection_app/trakt_instance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118282"
},
{
"name": "Shell",
"bytes": "1720"
}
],
"symlink_target": ""
} |
"""Map of test units to improvement direction."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from google.appengine.ext import ndb
from dashboard.models import anomaly
class UnitsToDirection(ndb.Model):
"""Data about improvement direction for one type of unit."""
bigger_is_better = ndb.BooleanProperty(indexed=False)
def GetImprovementDirection(units):
"""Returns the improvement direction for the given units string."""
if not units:
return anomaly.UNKNOWN
entity = ndb.Key(UnitsToDirection, units).get()
if not entity:
return anomaly.UNKNOWN
if entity.bigger_is_better:
return anomaly.UP
return anomaly.DOWN
def UpdateFromJson(units_dict):
"""Updates internal maps of units to direction from the given dictionary.
Args:
units_dict: A dictionary mapping unit names to dictionaries mapping
the string 'improvement_direction' to either 'up' or 'down'.
"""
existing_units = []
# Update or remove existing UnitsToDirection entities.
for units_to_direction_entity in UnitsToDirection.query():
unit = units_to_direction_entity.key.id()
if unit not in units_dict:
# Units not in the input dictionary will be removed from the datastore.
units_to_direction_entity.key.delete()
continue
existing_units.append(unit)
# Update the improvement direction if necessary.
improvement_direction = units_dict[unit]['improvement_direction']
bigger_is_better = (improvement_direction == 'up')
if units_to_direction_entity.bigger_is_better != bigger_is_better:
units_to_direction_entity.bigger_is_better = bigger_is_better
units_to_direction_entity.put()
# Add new UnitsToDirection entities.
for unit, value in units_dict.items():
if not isinstance(value, dict):
continue
if unit not in existing_units:
bigger_is_better = (value['improvement_direction'] == 'up')
UnitsToDirection(id=unit, bigger_is_better=bigger_is_better).put()
| {
"content_hash": "ff08ffdb58107e1b3640cbc67a390e02",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 34.91379310344828,
"alnum_prop": 0.7239506172839506,
"repo_name": "endlessm/chromium-browser",
"id": "ac1c7e5def27876212ec64e4e29737815e3fedcd",
"size": "2188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/dashboard/dashboard/units_to_direction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os, os.path, unittest
from django.contrib.gis.gdal import DataSource, Envelope, OGRException, OGRIndexError
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
from django.contrib import gis
# Path for SHP files
data_path = os.path.join(os.path.dirname(gis.__file__), 'tests' + os.sep + 'data')
def get_ds_file(name, ext):
return os.sep.join([data_path, name, name + '.%s' % ext])
# Test SHP data source object
class TestDS:
def __init__(self, name, **kwargs):
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
for key, value in kwargs.items():
setattr(self, key, value)
# List of acceptable data sources.
ds_list = (TestDS('test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,},
extent=(-1.35011,0.166623,-0.524093,0.824508), # Got extent from QGIS
srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]',
field_values={'dbl' : [float(i) for i in range(1, 6)], 'int' : range(1, 6), 'str' : [str(i) for i in range(1, 6)]},
fids=range(5)),
TestDS('test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype=1, driver='VRT',
fields={'POINT_X' : OFTString, 'POINT_Y' : OFTString, 'NUM' : OFTString}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={'POINT_X' : ['1.0', '5.0', '100.0'], 'POINT_Y' : ['2.0', '23.0', '523.5'], 'NUM' : ['5', '17', '23']},
fids=range(1,4)),
TestDS('test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,},
extent=(-1.01513,-0.558245,0.161876,0.839637), # Got extent from QGIS
srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]'),
)
bad_ds = (TestDS('foo'),
)
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
try:
ds[len(ds)]
except OGRIndexError:
pass
else:
self.fail('Expected an IndexError!')
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
self.assertRaises(OGRException, DataSource, source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
print "\nBEGIN - expecting out of range feature id error; safe to ignore.\n"
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and it's properties
self.assertEqual(True, isinstance(layer.extent, Envelope))
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds: self.assertEqual(True, f in source.fields)
# Negative FIDs are not allowed.
self.assertRaises(OGRIndexError, layer.__getitem__, -1)
self.assertRaises(OGRIndexError, layer.__getitem__, 50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
print "\nEND - expecting out of range feature id error; safe to ignore."
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"Test to make sure Layer access is still available without the DataSource."
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertEqual(True, isinstance(feat[k], v))
# Testing Feature.__iter__
for fld in feat: self.assertEqual(True, fld.name in source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(source.srs_wkt, g.srs.wkt)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DataSourceTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| {
"content_hash": "79a999d0085cc7f5f8e196aeee410bfa",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 171,
"avg_line_length": 44.63681592039801,
"alnum_prop": 0.5600757913508694,
"repo_name": "chirpradio/chirpradio-volunteers",
"id": "30ce4624758b5898c8ed5cf993a093c1b574b5c7",
"size": "8972",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "site-packages/django/contrib/gis/gdal/tests/test_ds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "423260"
},
{
"name": "Python",
"bytes": "4902370"
},
{
"name": "Shell",
"bytes": "635"
}
],
"symlink_target": ""
} |
"""
line.client
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
import re
import requests
import sys
from api import LineAPI
from models import LineGroup, LineContact, LineRoom, LineMessage
from curve.ttypes import TalkException, ToType, OperationType, Provider
reload(sys)
sys.setdefaultencoding("utf-8")
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
class LineClient(LineAPI):
profile = None
contacts = []
rooms = []
groups = []
def __init__(self, id=None, password=None, authToken=None, is_mac=True, com_name="carpedm20"):
"""Provide a way to communicate with LINE server.
:param id: `NAVER id` or `LINE email`
:param password: LINE account password
:param authToken: LINE session key
:param is_mac: (optional) os setting
:param com_name: (optional) name of your system
>>> client = LineClient("carpedm20", "xxxxxxxxxx")
Enter PinCode '9779' to your mobile phone in 2 minutes
>>> client = LineClient("[email protected]", "xxxxxxxxxx")
Enter PinCode '7390' to your mobile phone in 2 minutes
>>> client = LineClient(authToken="xxx ... xxx")
True
"""
if not (authToken or id and password):
msg = "id and password or authToken is needed"
self.raise_error(msg)
if is_mac:
os_version = "10.9.4-MAVERICKS-x64"
user_agent = "DESKTOP:MAC:%s(%s)" % (os_version, self.version)
app = "DESKTOPMAC\t%s\tMAC\t%s" % (self.version, os_version)
else:
os_version = "5.1.2600-XP-x64"
user_agent = "DESKTOP:WIN:%s(%s)" % (os_version, self.version)
app = "DESKTOPWIN\t%s\tWINDOWS\t%s" % (self.version, os_version)
if com_name:
self.com_name = com_name
self._headers['User-Agent'] = user_agent
self._headers['X-Line-Application'] = app
if authToken:
self.authToken = self._headers['X-Line-Access'] = authToken
# self.tokenLogin()
self.ready()
else:
if EMAIL_REGEX.match(id):
self.provider = Provider.LINE # LINE
else:
self.provider = Provider.NAVER_KR # NAVER
self.id = id
self.password = password
self.is_mac = is_mac
self.login()
self.ready()
self.revision = self._getLastOpRevision()
self.getProfile()
try:
self.refreshGroups()
except: pass
try:
self.refreshContacts()
except: pass
try:
self.refreshActiveRooms()
except: pass
def getProfile(self):
"""Get `profile` of LINE account"""
if self._check_auth():
self.profile = LineContact(self, self._getProfile())
return self.profile
return None
def getContactByName(self, name):
"""Get a `contact` by name
:param name: name of a `contact`
"""
for contact in self.contacts:
if name == contact.name:
return contact
return None
def getContactById(self, id):
"""Get a `contact` by id
:param id: id of a `contact`
"""
for contact in self.contacts:
if contact.id == id:
return contact
if self.profile:
if self.profile.id == id:
return self.profile
return None
def getContactOrRoomOrGroupById(self, id):
"""Get a `contact` or `room` or `group` by its id
:param id: id of a instance
"""
return self.getContactById(id)\
or self.getRoomById(id)\
or self.getGroupById(id)
def refreshGroups(self):
"""Refresh groups of LineClient"""
if self._check_auth():
self.groups = []
self.addGroupsWithIds(self._getGroupIdsJoined())
self.addGroupsWithIds(self._getGroupIdsInvited(), False)
def addGroupsWithIds(self, group_ids, is_joined=True):
"""Refresh groups of LineClient"""
if self._check_auth():
new_groups = self._getGroups(group_ids)
for group in new_groups:
self.groups.append(LineGroup(self, group, is_joined))
self.groups.sort()
def refreshContacts(self):
"""Refresh contacts of LineClient """
if self._check_auth():
contact_ids = self._getAllContactIds()
contacts = self._getContacts(contact_ids)
self.contacts = []
for contact in contacts:
self.contacts.append(LineContact(self, contact))
self.contacts.sort()
def findAndAddContactByUserid(self, userid):
"""Find and add a `contact` by userid
:param userid: user id
"""
if self._check_auth():
try:
contact = self._findAndAddContactsByUserid(userid)
except TalkException as e:
self.raise_error(e.reason)
contact = contact.values()[0]
for c in self.contacts:
if c.id == contact.mid:
self.raise_error("%s already exists" % contact.displayName)
return
c = LineContact(self, contact.values()[0])
self.contacts.append(c)
self.contacts.sort()
return c
def _findAndAddContactByPhone(self, phone):
"""Find and add a `contact` by phone number
:param phone: phone number (unknown format)
"""
if self._check_auth():
try:
contact = self._findAndAddContactsByPhone(phone)
except TalkException as e:
self.raise_error(e.reason)
contact = contact.values()[0]
for c in self.contacts:
if c.id == contact.mid:
self.raise_error("%s already exists" % contact.displayName)
return
c = LineContact(self, contact.values()[0])
self.contacts.append(c)
self.contacts.sort()
return c
def _findAndAddContactByEmail(self, email):
"""Find and add a `contact` by email
:param email: email
"""
if self._check_auth():
try:
contact = self._findAndAddContactsByEmail(email)
except TalkException as e:
self.raise_error(e.reason)
contact = contact.values()[0]
for c in self.contacts:
if c.id == contact.mid:
self.raise_error("%s already exists" % contact.displayName)
return
c = LineContact(self, contact.values()[0])
self.contacts.append(c)
self.contacts.sort()
return c
def _findContactByUserid(self, userid):
"""Find a `contact` by userid
:param userid: user id
"""
if self._check_auth():
try:
contact = self._findContactByUserid(userid)
except TalkException as e:
self.raise_error(e.reason)
return LineContact(self, contact)
def refreshActiveRooms(self):
"""Refresh active chat rooms"""
if self._check_auth():
start = 1
count = 50
self.rooms = []
while True:
channel = self._getMessageBoxCompactWrapUpList(start, count)
for box in channel.messageBoxWrapUpList:
if box.messageBox.midType == ToType.ROOM:
room = LineRoom(self, self._getRoom(box.messageBox.id))
self.rooms.append(room)
if len(channel.messageBoxWrapUpList) == count:
start += count
else:
break
def createGroupWithIds(self, name, ids=[]):
"""Create a group with contact ids
:param name: name of group
:param ids: list of contact ids
"""
if self._check_auth():
try:
group = LineGroup(self, self._createGroup(name, ids))
self.groups.append(group)
return group
except Exception as e:
self.raise_error(e)
return None
def createGroupWithContacts(self, name, contacts=[]):
"""Create a group with contacts
:param name: name of group
:param contacts: list of contacts
"""
if self._check_auth():
try:
contact_ids = []
for contact in contacts:
contact_ids.append(contact.id)
group = LineGroup(self, self._createGroup(name, contact_ids))
self.groups.append(group)
return group
except Exception as e:
self.raise_error(e)
return None
def getGroupByName(self, name):
"""Get a group by name
:param name: name of a group
"""
for group in self.groups:
if name == group.name:
return group
return None
def getGroupById(self, id):
"""Get a group by id
:param id: id of a group
"""
for group in self.groups:
if group.id == id:
return group
return None
def inviteIntoGroup(self, group, contacts=[]):
"""Invite contacts into group
:param group: LineGroup instance
:param contacts: LineContact instances to invite
"""
if self._check_auth():
contact_ids = [contact.id for contact in contacts]
self._inviteIntoGroup(group.id, contact_ids)
def acceptGroupInvitation(self, group):
"""Accept a group invitation
:param group: LineGroup instance
"""
if self._check_auth():
try:
self._acceptGroupInvitation(group.id)
return True
except Exception as e:
self.raise_error(e)
return False
def leaveGroup(self, group):
"""Leave a group
:param group: LineGroup instance to leave
"""
if self._check_auth():
try:
self._leaveGroup(group.id)
self.groups.remove(group)
return True
except Exception as e:
self.raise_error(e)
return False
def createRoomWithIds(self, ids=[]):
"""Create a chat room with contact ids"""
if self._check_auth():
try:
room = LineRoom(self, self._createRoom(ids))
self.rooms.append(room)
return room
except Exception as e:
self.raise_error(e)
return None
def createRoomWithContacts(self, contacts=[]):
"""Create a chat room with contacts"""
if self._check_auth():
try:
contact_ids = []
for contact in contacts:
contact_ids.append(contact.id)
room = LineRoom(self, self._createRoom(contact_ids))
self.rooms.append(room)
return room
except Exception as e:
self.raise_error(e)
return None
def getRoomById(self, id):
"""Get a room by id
:param id: id of a room
"""
for room in self.rooms:
if room.id == id:
return room
return None
def inviteIntoRoom(self, room, contacts=[]):
"""Invite contacts into room
:param room: LineRoom instance
:param contacts: LineContact instances to invite
"""
if self._check_auth():
contact_ids = [contact.id for contact in contacts]
self._inviteIntoRoom(room.id, contact_ids)
def leaveRoom(self, room):
"""Leave a room
:param room: LineRoom instance to leave
"""
if self._check_auth():
try:
self._leaveRoom(room.id)
self.rooms.remove(room)
return True
except Exception as e:
self.raise_error(e)
return False
def sendMessage(self, message, seq=0):
"""Send a message
:param message: LineMessage instance to send
"""
if self._check_auth():
try:
return self._sendMessage(message, seq)
except TalkException as e:
self.updateAuthToken()
try:
return self._sendMessage(message, seq)
except Exception as e:
self.raise_error(e)
return False
def getMessageBox(self, id):
"""Get MessageBox by id
:param id: `contact` id or `group` id or `room` id
"""
if self._check_auth():
try:
messageBoxWrapUp = self._getMessageBoxCompactWrapUp(id)
return messageBoxWrapUp.messageBox
except:
return None
def getRecentMessages(self, messageBox, count):
"""Get recent message from MessageBox
:param messageBox: MessageBox object
"""
if self._check_auth():
id = messageBox.id
messages = self._getRecentMessages(id, count)
return self.getLineMessageFromMessage(messages)
def longPoll(self, count=50, debug=False):
"""Receive a list of operations that have to be processed by original
Line cleint.
:param count: number of operations to get from
:returns: a generator which returns operations
>>> for op in client.longPoll():
sender = op[0]
receiver = op[1]
message = op[2]
print "%s->%s : %s" % (sender, receiver, message)
"""
if self._check_auth():
"""Check is there any operations from LINE server"""
OT = OperationType
try:
operations = self._fetchOperations(self.revision, count)
except EOFError:
return
except TalkException as e:
if e.code == 9:
self.raise_error("user logged in to another machine")
else:
return
for operation in operations:
if debug:
print operation
if operation.type == OT.END_OF_OPERATION:
pass
elif operation.type == OT.SEND_MESSAGE:
pass
elif operation.type == OT.RECEIVE_MESSAGE:
message = LineMessage(self, operation.message)
raw_sender = operation.message._from
raw_receiver = operation.message.to
sender = self.getContactOrRoomOrGroupById(raw_sender)
receiver = self.getContactOrRoomOrGroupById(raw_receiver)
# If sender is not found, check member list of group chat sent to
if sender is None and type(receiver) is LineGroup:
for m in receiver.members:
if m.id == raw_sender:
sender = m
break
if sender is None or receiver is None:
self.refreshGroups()
self.refreshContacts()
self.refreshActiveRooms()
sender = self.getContactOrRoomOrGroupById(raw_sender)
receiver = self.getContactOrRoomOrGroupById(raw_receiver)
if sender is None or receiver is None:
contacts = self._getContacts([raw_sender, raw_receiver])
if contacts:
if len(contacts) == 2:
sender = LineContact(self, contacts[0])
receiver = LineContact(self, contacts[1])
yield (sender, receiver, message)
else:
print "[*] %s" % OT._VALUES_TO_NAMES[operation.type]
print operation
self.revision = max(operation.revision, self.revision)
def createContactOrRoomOrGroupByMessage(self, message):
if message.toType == ToType.USER:
pass
elif message.toType == ToType.ROOM:
pass
elif message.toType == ToType.GROUP:
pass
def getLineMessageFromMessage(self, messages=[]):
"""Change Message objects to LineMessage objects
:param messges: list of Message object
"""
lineMessages = []
for message in messages:
lineMessages.append(LineMessage(self, message))
return lineMessages
def _check_auth(self):
"""Check if client is logged in or not"""
if self.authToken:
return True
else:
msg = "you need to login"
self.raise_error(msg)
| {
"content_hash": "6217ffa0bd6932fe9d333db26fd6e4da",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 98,
"avg_line_length": 30.1663807890223,
"alnum_prop": 0.5182236879513277,
"repo_name": "hitobias/line",
"id": "a965037d5d9f83459b5755b4dd60fe733c0cbdb9",
"size": "17611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "line/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "101"
},
{
"name": "Python",
"bytes": "41100"
}
],
"symlink_target": ""
} |
import os
PROJECT_DIR = os.path.split(os.path.abspath(__file__))[0]
STATIC_DIR = os.path.join(PROJECT_DIR, "static")
TEMPLATE_DIR = os.path.join(PROJECT_DIR, "templates")
OUT_DIR = os.path.join(PROJECT_DIR, "out")
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(OUT_DIR, 'demo.db')
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATIC_DIR,
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'Writeyoursecretaccesskey'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_app.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIR,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'test_app',
'sqlreports',
'south'
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
ROOT_URLCONF = 'test_app.urls'
| {
"content_hash": "fee6fc3b6b512d258529da429305a304",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 31.554621848739497,
"alnum_prop": 0.7169107856191744,
"repo_name": "hashedin/django-sql-reports",
"id": "127037f264f745666f3967061b5cf09a93ece3e8",
"size": "3755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_app/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15743"
}
],
"symlink_target": ""
} |
import json
import imghdr
import posixpath
from django.shortcuts import render, get_object_or_404
from django.contrib import messages
from django.utils.six import text_type
from django.http import HttpResponse
from sendfile import sendfile
from waliki.models import Page
from waliki.acl import permission_required
from .models import Attachment
@permission_required('change_page')
def attachments(request, slug):
last_attached = None
page = get_object_or_404(Page, slug=slug)
if request.method == 'POST' and 'attach' in request.FILES:
last_attached = request.FILES['attach']
Attachment.objects.create(file=last_attached, page=page)
messages.success(request, '"%s" was attached succesfully to /%s' % (last_attached.name, page.slug))
return render(request, 'waliki/attachments.html', {'page': page})
@permission_required('delete_page')
def delete_attachment(request, slug, attachment_id_or_filename):
if attachment_id_or_filename.isnumeric():
attachment = get_object_or_404(Attachment, id=attachment_id_or_filename, page__slug=slug)
else:
attachment = get_object_or_404(Attachment, file__endswith='%s%s' % (posixpath.sep, attachment_id_or_filename), page__slug=slug)
name = text_type(attachment)
if request.is_ajax() and request.method in ('POST', 'DELETE'):
attachment.delete()
return HttpResponse(json.dumps({'removed': name}), content_type="application/json")
return HttpResponse(json.dumps({'removed': None}), content_type="application/json")
@permission_required('view_page', raise_exception=True)
def get_file(request, slug, attachment_id=None, filename=None):
attachment = get_object_or_404(Attachment, file__endswith='%s%s' % (posixpath.sep, filename), page__slug=slug)
as_attachment = ((not imghdr.what(attachment.file.path) and 'embed' not in request.GET)
or 'as_attachment' in request.GET)
# ref https://github.com/johnsensible/django-sendfile
return sendfile(request, attachment.file.path,
attachment=as_attachment, attachment_filename=text_type(attachment))
| {
"content_hash": "e458b55a5e3638e2ab45b79f8c8f6df4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 135,
"avg_line_length": 47.22222222222222,
"alnum_prop": 0.7176470588235294,
"repo_name": "OlegGirko/waliki",
"id": "a4047d1e00740d9da04fb071bf0fe88e53824fbc",
"size": "2149",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "waliki/attachments/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "29217"
},
{
"name": "HTML",
"bytes": "56134"
},
{
"name": "JavaScript",
"bytes": "49407"
},
{
"name": "Makefile",
"bytes": "1261"
},
{
"name": "Python",
"bytes": "189734"
},
{
"name": "XSLT",
"bytes": "3542"
}
],
"symlink_target": ""
} |
"""New citree-based codeintel evaluation engine.
A 'citree' is basically an ElementTree of a CIX document with some tweaks.
The idea is to use these for completion/calltip evaluation instead of the
CIDB. This is mainly about performance but also about fixing some
limitations, bugs, and having a better code design (i.e. where lang-specific
quirks can be dealt with cleanly).
"""
import sys
from os.path import normpath
import logging
import re
import ciElementTree as ET
if not getattr(ET, "_patched_for_komodo_", False):
import warnings
warnings.warn("Not using codeintel's patched elementtree: "
"this may cause problems")
from codeintel2.common import *
from codeintel2.citadel import CitadelEvaluator
log = logging.getLogger("codeintel.tree")
CIX_VERSION = "2.0"
def tree_2_0_from_tree_0_1(tree):
"""Convert CIX 0.1 to CIX 2.0."""
# - update some of the no longer used <file> attributes
# - drop "generator"
try:
del tree[0].attrib["generator"]
except KeyError:
pass
# - drop 'md5' and 'mtime' on the <file> tag
try:
del tree[0].attrib["md5"]
except KeyError:
pass
try:
del tree[0].attrib["mtime"]
except KeyError:
pass
# - move "language" attribute on <file> to "lang" and to "lang" on
# <module> (New multi-lang CIX output will allow one file to
# have modules of different langs.)
for file in tree.getiterator("file"):
lang = file.get("language")
if lang is not None:
file.set("lang", lang)
for module in file.getiterator("module"):
if module.get("lang") is None:
module.set("lang", lang)
try:
del file.attrib["language"]
except KeyError:
# Be tolerant of transitional CIX.
pass
# - move <doc> and <signature> optional sub tags into parent
# attribute
# PERF: This could be done better.
for tag in ("variable", "function", "class", "module", "interface",
"argument", "classref", "interfaceref"):
for node in tree.getiterator(tag):
for child in reversed(node):
# reversed() so can modify while iterating over
if child.tag == "signature":
if child.text: # be tolerant of <signature />
node.set("signature", child.text)
node.remove(child)
elif child.tag == "doc":
if child.text: # be tolerant of <doc />
node.set("doc", child.text)
node.remove(child)
if not node: # no children now
node.text = None
# - move non-variable tags to attributes
# (XXX currently <classref> and <interfaceref> tags are not moved)
for tag in ("variable", "argument", "classref", "interfaceref"):
for node in tree.getiterator(tag):
for child in reversed(node):
if child.tag == "type":
node.set("citdl", child.get("type"))
node.remove(child)
if not node: # no remaining children
node.text = None
if tag == "argument":
node.tag = "variable"
node.set("ilk", "argument")
# - move <returns> to a <function> attribute
for node in tree.getiterator("function"):
for child in reversed(node): # PERF: could just check last child
if child.tag == "returns":
assert child[0].tag == "type"
node.set("returns", child[0].get("type"))
node.remove(child)
# - move classrefs and interfacerefs to attributes
# Note: <classref attribute="__mixin__"> => "mixinrefs" attribute.
# This is used by Ruby (though not used for eval, yet).
for scope_ilk in ("class", "interface"):
for node in tree.getiterator(scope_ilk):
interfacerefs = []
classrefs = []
mixinrefs = []
for child in reversed(node):
if child.tag == "classref":
if "__mixin__" in child.get("attributes", ""):
mixinrefs.append(child.get("citdl")
or child.attrib["name"])
else:
classrefs.append(child.get("citdl")
or child.attrib["name"])
node.remove(child)
elif child.tag == "interfaceref":
interfacerefs.append(child.get("citdl")
or child.attrib["name"])
node.remove(child)
if classrefs:
classrefs.reverse()
assert not [c for c in classrefs if ' ' in c]
node.set("classrefs", ' '.join(classrefs))
if interfacerefs:
interfacerefs.reverse()
assert not [i for i in interfacerefs if ' ' in i]
node.set("interfacerefs", ' '.join(interfacerefs))
if mixinrefs:
mixinrefs.reverse()
assert not [m for m in mixinrefs if ' ' in m]
node.set("mixinrefs", ' '.join(mixinrefs))
if len(node) == 0:
node.text = None
# - make all scope tags a "scope" tag (easier for elem.find() usage)
for tag in ("class", "function", "interface", "module"):
for node in tree.getiterator(tag):
node.tag = "scope"
if tag == "class" and "__namespace__" in node.get("attributes", ""):
node.set("ilk", "namespace")
attributes = node.get("attributes").split()
attributes.remove("__namespace__")
if not attributes:
del node.attrib["attributes"]
else:
node.set("attributes", ' '.join(attributes))
elif tag == "module":
node.set("ilk", "blob")
else:
node.set("ilk", tag)
tree.set("version", "2.0")
return tree
def tree_from_cix_path(cix_path):
"""Return a (ci)tree for the CIX content in the given path.
Raises pyexpat.ExpatError if the CIX content could not be parsed.
"""
tree = ET.parse(cix_path).getroot()
version = tree.get("version")
if version == CIX_VERSION:
return tree
elif version == "0.1":
return tree_2_0_from_tree_0_1(tree)
else:
raise CodeIntelError("unknown CIX version: %r" % version)
def tree_from_cix(cix):
"""Return a (ci)tree for the given CIX content.
Raises pyexpat.ExpatError if the CIX content could not be parsed.
"""
if isinstance(cix, str):
cix = cix.encode("UTF-8", "xmlcharrefreplace")
tree = ET.XML(cix)
version = tree.get("version")
if version == CIX_VERSION:
return tree
elif version == "0.1":
return tree_2_0_from_tree_0_1(tree)
else:
raise CodeIntelError("unknown CIX version: %r" % version)
def pretty_tree_from_tree(tree, indent_width=2):
"""Add appropriate .tail and .text values to the given tree so that
it will have a pretty serialization.
Note: This modifies the tree *in-place*.
Presumption: This is a CIX 2.0 tree.
"""
INDENT = ' '*indent_width
def _prettify(elem, indent_level=0):
if elem: # i.e. has children
elem.text = '\n' + INDENT*(indent_level+1)
for child in elem:
_prettify(child, indent_level+1)
elem[-1].tail = '\n' + INDENT*indent_level
elem.tail = '\n' + INDENT*indent_level
else:
elem.text = None
elem.tail = '\n' + INDENT*indent_level
_prettify(tree)
return tree
def check_tree(tree):
"""Generate warnings/errors for common mistakes in CIX trees.
Yields tuples of the form:
("warning|error", <msg>)
"""
assert tree.tag == "codeintel",\
"can only check starting from <codeintel> element"
assert tree.get("version") == CIX_VERSION, \
"can only check CIX v%s trees" % CIX_VERSION
# - file 'lang' is set, not 'language'
file = tree[0]
if not file.get("lang"):
yield ("error", "no 'lang' attr on <file> element")
if file.get("language"):
yield ("warning", "'language' attr on <file> element is obsolete,"
"use 'lang'")
for blob in file:
if blob.get("ilk") != "blob":
yield ("error", "element under <file> is not ilk=blob: %r" % blob)
# - blob 'lang' is set
if not blob.get("lang"):
yield ("error", "no 'lang' attr on <blob> element: %r" % blob)
# - classrefs are space separated, not with commas (warn)
for class_elem in blob.getiterator("scope"):
if class_elem.get("ilk") != "class":
continue
classrefs = class_elem.get("classrefs")
if not classrefs:
continue
if ',' in classrefs:
yield ("warning", "multiple class references in 'classrefs' "
"attr on class scopes must be "
"space-separated: %r may be using "
"comma-separation: %r"
% (class_elem, classrefs))
class TreeEvaluator(CitadelEvaluator):
def get_start_scoperef(self):
linenum = self.line + 1 # convert to 1-based
try:
blob = self.buf.blob_from_lang[self.trg.lang]
except KeyError:
raise EvalError("no %s scan info for %r" % (self.lang, self.buf))
return self.buf.scoperef_from_blob_and_line(blob, linenum)
def eval(self, mgr):
self.mgr = mgr
self.citadel = mgr.citadel
if self.ctlr.is_aborted():
self.ctlr.done("aborting")
return
self.ctlr.info("eval %s %s", self, self.trg)
self.pre_eval()
try:
if self.trg.form == TRG_FORM_CPLN:
cplns = self.eval_cplns()
if cplns:
cplns = self.post_process_cplns(cplns)
self.info(" cplns: %r", cplns)
if cplns:
self.ctlr.set_cplns(cplns)
elif self.trg.form == TRG_FORM_CALLTIP:
calltips = self.eval_calltips()
if calltips:
calltips = self.post_process_calltips(calltips)
self.info(" calltips: %r", calltips)
if calltips:
self.ctlr.set_calltips(calltips)
else: # self.trg.form == TRG_FORM_DEFN
defns = self.eval_defns()
if defns:
defns = Definition.unique_definitions(defns)
defns = self.post_process_defns(defns)
self.info(" defns: %r", defns)
if defns:
self.ctlr.set_defns(defns)
self.ctlr.done("success")
except CodeIntelError as ex:
# XXX Should we have an error handling hook here?
self.ctlr.error("evaluating %s: %s", self, ex)
self.ctlr.done("eval error")
except Exception:
log.exception("Unexpected error with evaluator: %s", self)
# Must still mark done on the ctlr to avoid leaks - bug 65502.
self.ctlr.done("eval error")
def scope_stack_from_tree_and_linenum(self, tree, linenum):
"""Get the start scope for the given line.
"linenum" appears to be 0-based, however all CIX line data
is 1-based so we'll convert here.
Dev Notes:
- XXX Add built-in scope.
"""
linenum += 1 # convert to 1-based
# XXX This is presuming that the tree has only one blob.
scope_stack = [tree.find("file/scope")]
while True:
next_scope_could_be = None
# PERF: Could make this a binary search if a scope has *lots* of
# subscopes.
for scope in scope_stack[-1].findall("scope"):
start = int(scope.get("line"))
if start <= linenum \
and (not scope.get("lineend")
or linenum <= int(scope.get("lineend"))):
next_scope_could_be = scope
elif start > linenum:
break
if next_scope_could_be is not None:
scope_stack.append(next_scope_could_be)
else:
break
return scope_stack
# TODO: split out '()' as a separate token.
def _tokenize_citdl_expr(self, citdl):
for token in citdl.split('.'):
yield token
def _join_citdl_expr(self, tokens):
return '.'.join(tokens)
def str_elem(self, elem):
if elem.tag == "scope":
return "%s %s" % (elem.get("ilk"), elem.get("name"))
else:
return "%s %s" % (elem.tag, elem.get("name"))
def str_elem_and_children(self, elem):
s = [self.str_elem(elem)]
for child in elem:
s.append(self.str_elem(child))
return "%s: %s" % (self.str_elem(elem),
', '.join(self.str_elem(c) for c in elem))
def str_import(self, elem):
# c.f. cb.py::getDescForImport()
module = elem.get("module")
symbol = elem.get("symbol")
alias = elem.get("alias")
if alias and symbol:
s = "from %(module)s import %(symbol)s as %(alias)s" % locals()
elif alias:
s = "import %(module)s as %(alias)s" % locals()
elif symbol:
s = "from %(module)s import %(symbol)s" % locals()
else:
s = "import %(module)s" % locals()
return s
# logging funcs (perhaps best on controller)
def log_start(self):
self._log = []
def log(self, msg, *args, **kwargs):
"""
kwargs:
"cached" (boolean) indicates if result was from cache
"""
pass
#log_indent = ' '*4
#if True: # just print as we go
# if args:
# s = [msg % args]
# else:
# s = [msg]
# if kwargs.get("cached"):
# s.append(" (cached)")
# self.info('%s', ''.join(s))
#else: # capture log for latter printing
# self._log.append(msg, args, kwargs)
def pre_eval(self):
self.curr_tree = self.buf.tree
# ET.dump(self.curr_tree)
def _eval_citdl_expr(self, expr, scope_stack):
"""Return the citree node for the given CITDL expression.
os.environ.get() -> <class 'str' on stdlib blob 'built-in'>
"""
tokens = list(self._tokenize_citdl_expr(expr))
assert tokens, "have to adjust handling if no tokens"
obj = self._eval_citdl_token(tokens[0], scope_stack)
for i, token in enumerate(tokens[1:]):
if token.endswith("()"):
token = token[-2:]
call = True
else:
call = False
if obj.tag == "import":
obj = self._eval_import_getattr(obj, token,
self._join_citdl_expr(tokens[:i+2]))
else:
# XXX marky: this code does not appear to be used!
# (nobody seems to define _eval_getattr)
obj = self._eval_getattr(obj, token,
self._join_citdl_expr(tokens[:i+2]))
if call:
raise CodeIntelError("_eval_citdl_expr(%r): not handling "
"call on %r "
% (expr, self.str_elem(obj)))
if obj.tag == "import":
raise CodeIntelError("_eval_citdl_expr: cannot return import "
"<%s>: need to resolve it"
% self.str_import(obj))
return obj
def _resolve_import(self, module_name, symbol_name=None):
"""Return a loaded citree node for the given import info.
"module_name" is the name of the module to import.
"symbol_name" (if given) is the attribute on that module to
return.
"""
# TODO: get logging right
# XXX All the logging stuff should go on the controller and that
# should get passed in here for appropriate logging of this
# eval.
# XXX Will this screw up for, e.g. in Python:
# 'import codeintel.utils'?
import_handler = self.citadel.import_handler_from_lang(self.lang)
module = import_handler.import_blob_name(
module_name, self.buf.libs, self.ctlr)
self.log("module '%s' imports <%s>", module_name,
self.str_elem(module))
if symbol_name:
# XXX logging won't be right here
return self._eval_getattr(module, symbol_name,
"%s.%s" % (module_name, symbol_name))
# XXX Here is the _eval_getattr code to duplicate.
# self.log("lookup '%s' on <%s>:", name, self.str_elem(obj))
# for child in obj:
# if child.get("name") == name:
# attr = child
# self.log("'%s' is <%s>", citdl_expr, self.str_elem(child))
# return attr
# else:
# raise CodeIntelError("couldn't find '%s' attribute on <%s>"
# % (name, self.str_elem(obj)))
else:
return module
def _eval_import(self, imp, name):
"""Return the object imported, if any, with the given import
node (in a citree) and name.
Return value: If successful it returns the citree node imported.
If 'name' was not found in a '*'-import then None is returned
(e.g. it is not exceptional that 'from os import *' does not
import 'fuzzywuzzy'). If the import could not be resolved, but
it looks like it should have been, then an error is raised.
"""
# One of these:
# 'os' may be from <import os>:
# ...
# 'os' is <blob os>
# 'os' is from <import os>: <blob os> (cached)
#
# Python
# if non-* import and matches:
# 'os' is from <import os>
# is <import os> from <project foo>? no
# ...
# is <import os> from <python-2.4-stdlib>? yes: <blob os>
# 'os' is <blob os>
#
# 'dirname' may be from <from os.path import *>:
# is <from os.path import *> from <project foo>? no
# ...
# is <from os.path import *> from <python-2.4-stdlib>? yes: <blob
# os.path>
# TOTEST:
# - 'from xml import dom', does that get it right? I doubt it.
module_name = imp.get("module")
symbol_name = imp.get("symbol")
alias = imp.get("alias")
obj = None
if alias:
if alias == name: # <import foo as name> or <from foo import bar as name>
self.log("'%s' is from <%s>", name, self.str_import(imp))
return self._resolve_import(module_name, symbol_name)
elif symbol_name:
assert symbol_name != "**", "only Perl should be using '**' for imports"
if symbol_name == "*": # <from foo import *>
self.log("'%s' may be from <%s>", name, imp)
# XXX some variation of _resolve_import to specify just
# importing the module.
try:
module = self._resolve_import(module_name)
except CodeIntelError as ex: # use equivalent of NoModuleEntry?
self.warn("could not resolve '%s' import to handle <%s>",
module_name, self.str_import(imp))
return None
# TODO:
# see if name in export list (__all__ for Python,
# @EXPORT for Perl, default all top-level symbols)
# if so, eval getattr of name on module object
self.warn("not handling <%s>!", self.str_import(imp))
if symbol_name == name: # <from foo import name>
self.log("'%s' is from <%s>", name, self.str_import(imp))
return self._resolve_import(module_name, symbol_name)
elif module_name == name: # <import foo>
self.log("'%s' is from <%s>", name, self.str_import(imp))
return self._resolve_import(module_name)
return None
def _eval_citdl_token(self, token, scope_stack):
start_scope_str = self.str_elem(scope_stack[-1])
self.log("eval '%s' at <%s>:", token, start_scope_str)
while scope_stack:
scope = scope_stack.pop()
self.log("is '%s' accessible on <%s>?",
token, self.str_elem(scope))
for child in reversed(scope):
# Check children in reverse because last definition in
# file wins. A correct refinement *for the top-level*
# would be to skip anything defined later in the file
# than the current start position.
# TODO-PERF: The list of symbols on a scope should be a
# dict to speed up this loop. This is complicated
# by '*' imports.
if child.tag == "import":
obj = self._eval_import(child, token)
if obj:
return obj
elif child.get("name") == token:
obj = child
if obj.tag == "variable":
citdl = obj.get("citdl")
if not citdl:
self.log("'%s' is <%s> which is of unknown type",
token, self.str_elem(obj))
raise CodeIntelError(
"don't know type of <%s> on <%s>"
% (self.str_elem(obj), self.str_elem(scope)))
else:
self.log("'%s' is <%s> which is '%s'", token,
self.str_elem(obj), citdl)
obj = self._eval_citdl_expr(
citdl, scope_stack+[scope])
self.log("'%s' is <%s>", token,
self.str_elem(obj))
else:
self.log("'%s' is <%s>", token, self.str_elem(obj))
return obj
else:
continue
else:
raise CodeIntelError("couldn't resolve '%s' starting at %s"
% (token, start_scope_str))
def _defn_from_hit(self, hit):
elem, (blob, lpath) = hit
# self.log("_defn_from_hit:: blob: %r", blob)
# for attr_name, attr_value in blob.attrib.items():
# self.log("attr_name: %r, attr_value: %r", attr_name, attr_value)
# self.log("_defn_from_hit:: elem: %r", elem)
path = blob.get("src", None)
name = elem.get("name", None)
line = elem.get(
"line", 1) # e.g. for an import, just use the first line
if line is not None:
try:
line = int(line)
except ValueError:
line = 1
ilk = elem.get("ilk") or elem.tag
citdl = elem.get("citdl", None)
doc = elem.get("doc", None)
signature = elem.get("signature", None)
attributes = elem.get("attributes", None)
returns = elem.get("returns", None)
try:
scope = self._elem_from_scoperef((blob, lpath))
except AttributeError:
scopestart = 1
scopeend = 0
else:
def safe_int_get(attr, default_value):
try:
return int(scope.get(attr, default_value))
except (ValueError, AttributeError):
return default_value
scopestart = safe_int_get("line", 1)
scopeend = safe_int_get("lineend", 0)
# Only fixup paths that do not look like URIs.
if path and not re.match(r"^\w+:\/\/", path):
if sys.platform == "win32":
path = path.replace('/', '\\') # unnormalize path
path = normpath(path) # remove possible '.' and '..' elements
defn = Definition(blob.get("lang"), path, blob.get("name"), lpath,
name, line, ilk, citdl, doc,
signature, attributes, returns, scopestart, scopeend)
return defn
class _infinite_recursion_checker(object):
def __init__(self, evalr):
self.evalr = evalr
def __enter__(self):
self.evalr._eval_count_all += 1
if self.evalr._eval_count_all >= self.evalr._SENTINEL_MAX_ALL_COUNT:
raise EvalError("Too much recursion")
def __exit__(self, exc_type, exc_value, traceback):
self.evalr._eval_count_all -= 1
# The SENTINEL_MAX_EXPR_COUNT could probably be *reduced*.
# Note: This is an approximation that we are infinitely looping
# on the same evaluation. The *actual* appropriate key would be:
#
# (expr, scoperef)
#
# but that is overkill for now, I think.
_SENTINEL_MAX_EXPR_COUNT = 10
_SENTINEL_MAX_ALL_COUNT = 100
_eval_count_from_expr = None
_eval_count_all = 0
def _check_infinite_recursion(self, expr):
if self._eval_count_from_expr is None:
# Move this init into eval() when on TreeEvalutor.
self._eval_count_from_expr = {}
eval_count = self._eval_count_from_expr.get(expr, 0)
eval_count += 1
if eval_count >= self._SENTINEL_MAX_EXPR_COUNT:
raise EvalError("hit eval sentinel: expr '%s' eval count "
"is %d (abort)" % (expr, eval_count))
self._eval_count_from_expr[expr] = eval_count
return TreeEvaluator._infinite_recursion_checker(self)
#---- internal support stuff
def _dump_element(elem, indent=''):
"""Dump an element tree without using ET.dump() which
(1) uses XML syntax,
(2) can blow up if an attribute is set to None accidentally.
This is only useful for debugging.
"""
s = "%selement '%s': %s" % (indent, elem.tag, elem.attrib)
#print(s)
for child in elem:
_dump_element(child, indent+' ')
| {
"content_hash": "45ac704112913b8f6ce751c1b33c08cf",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 87,
"avg_line_length": 39.412884333821374,
"alnum_prop": 0.5135406218655968,
"repo_name": "anisku11/sublimeku",
"id": "30e0fe1af88ce23dc5995001d29a26a36da3d111",
"size": "28625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Packages/CodeComplice/libs/codeintel2/tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17282"
},
{
"name": "Go",
"bytes": "5456"
},
{
"name": "HTML",
"bytes": "4990"
},
{
"name": "JavaScript",
"bytes": "1714"
},
{
"name": "PHP",
"bytes": "43942"
},
{
"name": "Python",
"bytes": "9058676"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
} |
import os
from xobj import xobj
if not os.environ.has_key('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'mint.django_rest.settings'
class LinkElement(object):
_xobj = xobj.XObjMetadata(
attributes = {
'href' : str,
},
)
def __init__(self, uri, value=None):
self.href = "%(uri)s" % vars()
self._xobj.text = value
def __repr__(self):
return unicode(self._value)
class IDElement(object):
_xobj = xobj.XObjMetadata(
attributes = {
'id' : str,
},
)
def __init__(self, uri):
self.id = "%(uri)s" % vars()
def __repr__(self):
return unicode(self.id)
| {
"content_hash": "a60b33598b01d08c52392a91437ed330",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.5053475935828877,
"repo_name": "sassoftware/mint",
"id": "5fc6eff10ba9b24423921cfa0ad66be22de5f5cb",
"size": "1335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mint/django_rest/rbuilder/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50165"
},
{
"name": "Genshi",
"bytes": "58741"
},
{
"name": "HTML",
"bytes": "2814"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Makefile",
"bytes": "92418"
},
{
"name": "NASL",
"bytes": "582"
},
{
"name": "PLpgSQL",
"bytes": "5358"
},
{
"name": "Puppet",
"bytes": "17914"
},
{
"name": "Python",
"bytes": "3239135"
},
{
"name": "Ruby",
"bytes": "9268"
},
{
"name": "Shell",
"bytes": "24834"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| {
"content_hash": "631c78696ff81be8ed853ae6222b1213",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 68,
"avg_line_length": 19.08888888888889,
"alnum_prop": 0.5820721769499418,
"repo_name": "Sarah-Alsinan/muypicky",
"id": "0f5df829f71e7a9e0ed433db1da785d1975f845d",
"size": "1076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/pilfont.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47175"
},
{
"name": "HTML",
"bytes": "145740"
},
{
"name": "JavaScript",
"bytes": "115482"
},
{
"name": "Python",
"bytes": "7684881"
},
{
"name": "Shell",
"bytes": "3754"
}
],
"symlink_target": ""
} |
from handlers.base import BaseController
from tornado import gen
from model.question.answer import Answer
from model.question.question import Question
from model.question.score import Score
class AnswerHandler(BaseController):
@gen.coroutine
def get(self):
pass
@gen.coroutine
def post(self):
method = self.get_argument('method', '')
if method == "create":
answer = self.get_argument('answer', '')
question = self.get_argument('question', '')
if answer == "":
self.sendError("缺少 answer 参数")
raise gen.Return()
if len(answer) > 3000:
self.sendError("参数过长"); raise gen.Return()
if question == "":
self.sendError("缺少 question 参数"); raise gen.Return()
e_question = yield Question.get_question_by_qid(question)
if e_question == None:
self.sendError("question 不存在"); raise gen.Return()
aid = yield Answer.create_answer(answer, question, self.current_user.user_id)
Score.add_score(20, "answer question "+str(e_question.qid), self.current_user.user_id)
self.sendData(True, "创建成功", {'aid' : aid})
elif method == "delete":
aid = self.get_argument("aid", "")
if aid == "" or not aid.isdigit():
self.sendError("aid 参数错误"); raise gen.Return()
e_answer = yield Answer.get_answer_by_aid(aid)
if not e_answer:
self.sendError("不存在的aid"); raise gen.Return()
if e_answer.author != self.current_user.user_id:
self.sendError("没有权限操作"); raise gen.Return()
yield Answer.delete_answer_by_aid(aid)
Score.add_score(-20, "delete answer "+str(e_answer.aid), self.current_user.user_id)
self.sendData(True, "操作成功", aid)
elif method == "update":
aid = self.get_argument('aid', '')
content = self.get_argument('content', '')
if aid == '' or not aid.isdigit():
self.sendError("aid参数错误"); raise gen.Return()
if content == '' or len(content) > 3000:
self.sendError("content长度不符合"); raise gen.Return()
result = yield Answer.update_answer(aid, content)
self.sendData(True, "操作成功")
else:
self.sendError("未知操作"); raise gen.Return()
| {
"content_hash": "bb0530c537851b010a4c78b89b9c0632",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 98,
"avg_line_length": 32.54666666666667,
"alnum_prop": 0.5604260548955347,
"repo_name": "VMatrixTeam/open-matrix",
"id": "9898055a4f9ac83a1e6292678b75bf254bc48327",
"size": "2565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webservice/handlers/service/question/answer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "91"
},
{
"name": "CSS",
"bytes": "224841"
},
{
"name": "HTML",
"bytes": "68428"
},
{
"name": "JavaScript",
"bytes": "733814"
},
{
"name": "Python",
"bytes": "106477"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
} |
"""
Auxiliary classes and functions for used by the other AMFM_decompy modules.
Version 1.0.11
23/Jan/2021 Bernardo J.B. Schmitt - [email protected]
"""
import numpy as np
from scipy.signal import lfilter
"""
Creates a signal object.
"""
class SignalObj(object):
def __init__(self, *args, **kwargs):
output_dtype = kwargs.get('output_dtype', 'f')
# Read the signal data from the path of a wav file.
if len(args) == 1 or 'name' in kwargs:
name = args[0] if len(args) == 1 else kwargs['name']
try:
from scipy.io import wavfile
except:
print("ERROR: Wav modules could not loaded!")
raise KeyboardInterrupt
self.fs, self.data = wavfile.read(name)
self.name = name
# Alternatively, read the signal from a Numpy array.
elif len(args) == 2 or all (k in kwargs.keys() for k in ('data','fs')):
data = args[0] if len(args) == 2 else kwargs['data']
fs = args[1] if len(args) == 2 else kwargs['fs']
self.data = data
self.fs = fs
# If the signal data is in the signed integer format (PCM), convert it
# to float.
if self.data.dtype.kind == 'i':
self.nbits = self.data.itemsize*8
self.data = pcm2float(self.data, output_dtype)
self.size = len(self.data)
self.fs = float(self.fs)
# Check if the wav file is stereo.
if self.size == self.data.size/2:
print("Warning: stereo wav file. Converting it to mono for the analysis.")
self.data = (self.data[:,0]+self.data[:,1])/2
"""
Filters the signal data by a bandpass filter object and decimate it.
"""
def filtered_version(self, bp_filter):
# Filter the signal.
tempData = lfilter(bp_filter.b, bp_filter.a, self.data)
# Decimate the filtered output.
self.filtered = tempData[0:self.size:bp_filter.dec_factor]
self.new_fs = self.fs/bp_filter.dec_factor
"""
Method that uses the pitch values to estimate the number of modulated
components in the signal.
"""
def set_nharm(self, pitch_track, n_harm_max):
n_harm = (self.fs/2)/np.amax(pitch_track) - 0.5
self.n_harm = int(np.floor(min(n_harm, n_harm_max)))
"""
Adds a zero-mean gaussian noise to the signal.
"""
def noiser(self, pitch_track, SNR):
self.clean = np.empty((self.size))
self.clean[:] = self.data
RMS = np.std(self.data[pitch_track > 0])
noise = np.random.normal(0, RMS/(10**(SNR/20)), self.size)
self.data += noise
"""
Transform a pcm raw signal into a float one, with values limited between -1 and
1.
"""
def pcm2float(sig, output_dtype=np.float64):
# Make sure it's a NumPy array.
sig = np.asarray(sig)
# Check if it is an array of signed integers.
assert sig.dtype.kind == 'i', "'sig' must be an array of signed integers!"
# Set the array output format. Accepts string as input argument for the
# desired output format (e.g. 'f').
out_dtype = np.dtype(output_dtype)
# Note that 'min' has a greater (by 1) absolute value than 'max'!
# Therefore, we use 'min' here to avoid clipping.
return sig.astype(out_dtype) / out_dtype.type(-np.iinfo(sig.dtype).min)
| {
"content_hash": "24b19503fcdae3a1ea028bbcb5d0508a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 86,
"avg_line_length": 30.46846846846847,
"alnum_prop": 0.5987581312832644,
"repo_name": "bjbschmitt/AMFM_decompy",
"id": "129defe3b1d12ca3a5c185e96c6e2626ee2a3cec",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amfm_decompy/basic_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70767"
}
],
"symlink_target": ""
} |
import numpy
from prpy.tsr.tsrlibrary import TSRFactory
from prpy.tsr.tsr import *
@TSRFactory('herb', 'plastic_bowl', 'grasp')
def bowl_grasp(robot, bowl, manip=None):
'''
@param robot The robot performing the grasp
@param bowl The bowl to grasp
@param manip The manipulator to perform the grasp,
if None the active manipulator on the robot is used
'''
if manip is None:
manip_idx = robot.GetActiveManipulatorIndex()
else:
with manip.GetRobot():
manip.SetActive()
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
T0_w = bowl.GetTransform()
Tw_e = numpy.array([[1., 0., 0., 0.],
[0., -1., 0., 0.],
[0., 0., -1., 0.34],
[0., 0., 0., 1.]])
Bw = numpy.zeros((6,2))
Bw[2,:] = [-0.02, 0.02] # Allow a little verticle movement
Bw[5,:] = [-numpy.pi, numpy.pi] # Allow any orientation
grasp_tsr = TSR(T0_w = T0_w, Tw_e = Tw_e, Bw = Bw, manip = manip_idx)
grasp_chain = TSRChain(sample_start=False, sample_goal = True, constrain=False, TSR = grasp_tsr)
return [grasp_chain]
@TSRFactory('herb', 'plastic_bowl', 'place')
def bowl_on_table(robot, bowl, pose_tsr_chain, manip=None):
'''
Generates end-effector poses for placing the bowl on the table.
This factory assumes the bowl is grasped at the time it is called.
@param robot The robot grasping the bowl
@param bowl The grasped object
@param pose_tsr_chain The tsr chain for sampling placement poses for the bowl
@param manip The manipulator grasping the object, if None the active
manipulator of the robot is used
'''
if manip is None:
manip_idx = robot.GetActiveManipulatorIndex()
manip = robot.GetActiveManipulator()
else:
with manip.GetRobot():
manip.SetActive()
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
ee_in_bowl = numpy.dot(numpy.linalg.inv(bowl.GetTransform()), manip.GetEndEffectorTransform())
Bw = numpy.zeros((6,2))
Bw[2,:] = [0., 0.08] # Allow some vertical movement
for tsr in pose_tsr_chain.TSRs:
if tsr.manipindex != manip_idx:
raise Exception('pose_tsr_chain defined for a different manipulator.')
grasp_tsr = TSR(Tw_e = ee_in_bowl, Bw = Bw, manip = manip_idx)
all_tsrs = list(pose_tsr_chain.TSRs) + [grasp_tsr]
place_chain = TSRChain(sample_start = False, sample_goal = True, constrain = False,
TSRs = all_tsrs)
return [ place_chain ]
| {
"content_hash": "d59996f32b803e3b37409a852154795d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 100,
"avg_line_length": 38.83582089552239,
"alnum_prop": 0.6149116064565718,
"repo_name": "Shushman/herbpy",
"id": "881de8c5cf5b214e6c8d8deffbc2d62e74706c90",
"size": "2602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/herbpy/tsr/bowl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "2198"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "104139"
}
],
"symlink_target": ""
} |
"""Support for RSS/Atom feeds."""
from datetime import datetime, timedelta
from logging import getLogger
from os.path import exists
from threading import Lock
import pickle
import voluptuous as vol
import feedparser
from homeassistant.const import EVENT_HOMEASSISTANT_START, CONF_SCAN_INTERVAL
from homeassistant.helpers.event import track_time_interval
import homeassistant.helpers.config_validation as cv
_LOGGER = getLogger(__name__)
CONF_URLS = "urls"
CONF_MAX_ENTRIES = "max_entries"
DEFAULT_MAX_ENTRIES = 20
DEFAULT_SCAN_INTERVAL = timedelta(hours=1)
DOMAIN = "feedreader"
EVENT_FEEDREADER = "feedreader"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
vol.Required(CONF_URLS): vol.All(cv.ensure_list, [cv.url]),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
vol.Optional(
CONF_MAX_ENTRIES, default=DEFAULT_MAX_ENTRIES
): cv.positive_int,
}
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Feedreader component."""
urls = config.get(DOMAIN)[CONF_URLS]
scan_interval = config.get(DOMAIN).get(CONF_SCAN_INTERVAL)
max_entries = config.get(DOMAIN).get(CONF_MAX_ENTRIES)
data_file = hass.config.path(f"{DOMAIN}.pickle")
storage = StoredData(data_file)
feeds = [
FeedManager(url, scan_interval, max_entries, hass, storage) for url in urls
]
return len(feeds) > 0
class FeedManager:
"""Abstraction over Feedparser module."""
def __init__(self, url, scan_interval, max_entries, hass, storage):
"""Initialize the FeedManager object, poll as per scan interval."""
self._url = url
self._scan_interval = scan_interval
self._max_entries = max_entries
self._feed = None
self._hass = hass
self._firstrun = True
self._storage = storage
self._last_entry_timestamp = None
self._last_update_successful = False
self._has_published_parsed = False
self._event_type = EVENT_FEEDREADER
self._feed_id = url
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, lambda _: self._update())
self._init_regular_updates(hass)
def _log_no_entries(self):
"""Send no entries log at debug level."""
_LOGGER.debug("No new entries to be published in feed %s", self._url)
def _init_regular_updates(self, hass):
"""Schedule regular updates at the top of the clock."""
track_time_interval(hass, lambda now: self._update(), self._scan_interval)
@property
def last_update_successful(self):
"""Return True if the last feed update was successful."""
return self._last_update_successful
def _update(self):
"""Update the feed and publish new entries to the event bus."""
_LOGGER.info("Fetching new data from feed %s", self._url)
self._feed = feedparser.parse(
self._url,
etag=None if not self._feed else self._feed.get("etag"),
modified=None if not self._feed else self._feed.get("modified"),
)
if not self._feed:
_LOGGER.error("Error fetching feed data from %s", self._url)
self._last_update_successful = False
else:
# The 'bozo' flag really only indicates that there was an issue
# during the initial parsing of the XML, but it doesn't indicate
# whether this is an unrecoverable error. In this case the
# feedparser lib is trying a less strict parsing approach.
# If an error is detected here, log error message but continue
# processing the feed entries if present.
if self._feed.bozo != 0:
_LOGGER.error(
"Error parsing feed %s: %s", self._url, self._feed.bozo_exception
)
# Using etag and modified, if there's no new data available,
# the entries list will be empty
if self._feed.entries:
_LOGGER.debug(
"%s entri(es) available in feed %s",
len(self._feed.entries),
self._url,
)
self._filter_entries()
self._publish_new_entries()
if self._has_published_parsed:
self._storage.put_timestamp(
self._feed_id, self._last_entry_timestamp
)
else:
self._log_no_entries()
self._last_update_successful = True
_LOGGER.info("Fetch from feed %s completed", self._url)
def _filter_entries(self):
"""Filter the entries provided and return the ones to keep."""
if len(self._feed.entries) > self._max_entries:
_LOGGER.debug(
"Processing only the first %s entries " "in feed %s",
self._max_entries,
self._url,
)
self._feed.entries = self._feed.entries[0 : self._max_entries]
def _update_and_fire_entry(self, entry):
"""Update last_entry_timestamp and fire entry."""
# Check if the entry has a published date.
if "published_parsed" in entry.keys() and entry.published_parsed:
# We are lucky, `published_parsed` data available, let's make use of
# it to publish only new available entries since the last run
self._has_published_parsed = True
self._last_entry_timestamp = max(
entry.published_parsed, self._last_entry_timestamp
)
else:
self._has_published_parsed = False
_LOGGER.debug("No published_parsed info available for entry %s", entry)
entry.update({"feed_url": self._url})
self._hass.bus.fire(self._event_type, entry)
def _publish_new_entries(self):
"""Publish new entries to the event bus."""
new_entries = False
self._last_entry_timestamp = self._storage.get_timestamp(self._feed_id)
if self._last_entry_timestamp:
self._firstrun = False
else:
# Set last entry timestamp as epoch time if not available
self._last_entry_timestamp = datetime.utcfromtimestamp(0).timetuple()
for entry in self._feed.entries:
if self._firstrun or (
"published_parsed" in entry.keys()
and entry.published_parsed > self._last_entry_timestamp
):
self._update_and_fire_entry(entry)
new_entries = True
else:
_LOGGER.debug("Entry %s already processed", entry)
if not new_entries:
self._log_no_entries()
self._firstrun = False
class StoredData:
"""Abstraction over pickle data storage."""
def __init__(self, data_file):
"""Initialize pickle data storage."""
self._data_file = data_file
self._lock = Lock()
self._cache_outdated = True
self._data = {}
self._fetch_data()
def _fetch_data(self):
"""Fetch data stored into pickle file."""
if self._cache_outdated and exists(self._data_file):
try:
_LOGGER.debug("Fetching data from file %s", self._data_file)
with self._lock, open(self._data_file, "rb") as myfile:
self._data = pickle.load(myfile) or {}
self._cache_outdated = False
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error(
"Error loading data from pickled file %s", self._data_file
)
def get_timestamp(self, feed_id):
"""Return stored timestamp for given feed id (usually the url)."""
self._fetch_data()
return self._data.get(feed_id)
def put_timestamp(self, feed_id, timestamp):
"""Update timestamp for given feed id (usually the url)."""
self._fetch_data()
with self._lock, open(self._data_file, "wb") as myfile:
self._data.update({feed_id: timestamp})
_LOGGER.debug(
"Overwriting feed %s timestamp in storage file %s",
feed_id,
self._data_file,
)
try:
pickle.dump(self._data, myfile)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error saving pickled data to %s", self._data_file)
self._cache_outdated = True
| {
"content_hash": "0da306f85693ceba288abe0cd62b6666",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 85,
"avg_line_length": 38.52252252252252,
"alnum_prop": 0.580799812909261,
"repo_name": "joopert/home-assistant",
"id": "27b164e4edf27e6d3bed6d95956ab97d80bc84e8",
"size": "8552",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/feedreader/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
"""Utilities related to searching Elastic."""
from __future__ import absolute_import
from __future__ import print_function
from pprint import pprint
from django.conf import settings
from .indexes import PageIndex, ProjectIndex, SectionIndex
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Project
from readthedocs.search.signals import (before_project_search,
before_file_search,
before_section_search)
def search_project(request, query, language=None):
"""Search index for projects matching query"""
body = {
"query": {
"bool": {
"should": [
{"match": {"name": {"query": query, "boost": 10}}},
{"match": {"description": {"query": query}}},
]
},
},
"facets": {
"language": {
"terms": {"field": "lang"},
},
},
"highlight": {
"fields": {
"name": {},
"description": {},
}
},
"fields": ["name", "slug", "description", "lang", "url"],
"size": 50 # TODO: Support pagination.
}
if language:
body['facets']['language']['facet_filter'] = {"term": {"lang": language}}
body['filter'] = {"term": {"lang": language}}
before_project_search.send(request=request, sender=ProjectIndex, body=body)
return ProjectIndex().search(body)
def search_file(request, query, project_slug=None, version_slug=LATEST, taxonomy=None):
"""Search index for files matching query
Raises a 404 error on missing project
:param request: request instance
:param query: string to query for
:param project_slug: :py:class:`Project` slug
:param version_slug: slug for :py:class:`Project` version slug
:param taxonomy: taxonomy for search
"""
kwargs = {}
body = {
"query": {
"bool": {
"should": [
{"match_phrase": {
"title": {
"query": query,
"boost": 10,
"slop": 2,
},
}},
{"match_phrase": {
"headers": {
"query": query,
"boost": 5,
"slop": 3,
},
}},
{"match_phrase": {
"content": {
"query": query,
"slop": 5,
},
}},
]
}
},
"facets": {
"taxonomy": {
"terms": {"field": "taxonomy"},
},
"project": {
"terms": {"field": "project"},
},
"version": {
"terms": {"field": "version"},
},
},
"highlight": {
"fields": {
"title": {},
"headers": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path"],
"size": 50 # TODO: Support pagination.
}
if project_slug or version_slug or taxonomy:
final_filter = {"and": []}
if project_slug:
try:
project = (Project.objects
.api(request.user)
.get(slug=project_slug))
project_slugs = [project.slug]
# We need to use the obtuse syntax here because the manager
# doesn't pass along to ProjectRelationships
project_slugs.extend(s.slug for s
in Project.objects.public(
request.user).filter(
superprojects__parent__slug=project.slug))
final_filter['and'].append({"terms": {"project": project_slugs}})
# Add routing to optimize search by hitting the right shard.
# This purposely doesn't apply routing if the project has more
# than one parent project.
if project.superprojects.exists():
if project.superprojects.count() == 1:
kwargs['routing'] = (project.superprojects.first()
.parent.slug)
else:
kwargs['routing'] = project_slug
except Project.DoesNotExist:
return None
if version_slug:
final_filter['and'].append({'term': {'version': version_slug}})
if taxonomy:
final_filter['and'].append({'term': {'taxonomy': taxonomy}})
body['filter'] = final_filter
body['facets']['project']['facet_filter'] = final_filter
body['facets']['version']['facet_filter'] = final_filter
body['facets']['taxonomy']['facet_filter'] = final_filter
if settings.DEBUG:
print("Before Signal")
pprint(body)
before_file_search.send(request=request, sender=PageIndex, body=body)
if settings.DEBUG:
print("After Signal")
pprint(body)
return PageIndex().search(body, **kwargs)
def search_section(request, query, project_slug=None, version_slug=LATEST,
path=None):
"""Search for a section of content
When you search, you will have a ``project`` facet, which includes the
number of matching sections per project. When you search inside a project,
the ``path`` facet will show the number of matching sections per page.
:param request: Request instance
:param query: string to use in query
:param project_slug: :py:class:`Project` instance
:param version_slug: :py:class:`Project` version instance
:param taxonomy: search taxonomy
"""
kwargs = {}
body = {
"query": {
"bool": {
"should": [
{"match_phrase": {
"title": {
"query": query,
"boost": 10,
"slop": 2,
},
}},
{"match_phrase": {
"content": {
"query": query,
"slop": 5,
},
}},
]
}
},
"facets": {
"project": {
"terms": {"field": "project"},
"facet_filter": {
"term": {"version": version_slug},
}
},
},
"highlight": {
"fields": {
"title": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path", "page_id", "content"],
"size": 10 # TODO: Support pagination.
}
if project_slug:
body['filter'] = {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
}
body['facets']['path'] = {
"terms": {"field": "path"},
"facet_filter": {
"term": {"project": project_slug},
}
},
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
if path:
body['filter'] = {
"and": [
{"term": {"path": path}},
]
}
if path and not project_slug:
# Show facets when we only have a path
body['facets']['path'] = {
"terms": {"field": "path"}
}
before_section_search.send(request=request, sender=PageIndex, body=body)
return SectionIndex().search(body, **kwargs)
| {
"content_hash": "d5964ed72319f8bcba7156749720c3e4",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 87,
"avg_line_length": 32.61693548387097,
"alnum_prop": 0.440351094078378,
"repo_name": "pombredanne/readthedocs.org",
"id": "994601032d74620f7305fc854125e11db65f25cc",
"size": "8089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/search/lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66514"
},
{
"name": "HTML",
"bytes": "205587"
},
{
"name": "JavaScript",
"bytes": "444672"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1175310"
}
],
"symlink_target": ""
} |
__version__=''' $Id: test_docs_build.py 3694 2010-04-06 12:54:39Z rgbecker $ '''
"""Tests that all manuals can be built.
"""
from reportlab.lib.testutils import setOutDir,SecureTestCase, printLocation
setOutDir(__name__)
import os, sys, unittest
class ManualTestCase(SecureTestCase):
"Runs all 3 manual-builders from the top."
def test0(self):
"Test if all manuals buildable from source."
from reportlab.lib.testutils import testsFolder
try:
docsFolder = os.path.join(os.path.dirname(testsFolder),'docs')
except:
print testsFolder
raise
cwd = os.getcwd()
os.chdir(docsFolder)
try:
if os.path.isfile('reportlab-userguide.pdf'):
os.remove('reportlab-userguide.pdf')
if os.path.isfile('reportlab-reference.pdf'):
os.remove('reportlab-reference.pdf')
os.system('"%s" genAll.py -s' % sys.executable)
assert os.path.isfile('reportlab-userguide.pdf'), 'genAll.py failed to generate reportlab-userguide.pdf!'
assert os.path.isfile('reportlab-reference.pdf'), 'genAll.py failed to generate reportlab-reference.pdf!'
finally:
os.chdir(cwd)
def makeSuite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
if sys.platform[:4] != 'java':
suite.addTest(loader.loadTestsFromTestCase(ManualTestCase))
return suite
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| {
"content_hash": "ac2f57fdfbb43c3d5ecb73d051c7bdf8",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 117,
"avg_line_length": 35.43181818181818,
"alnum_prop": 0.6369467607440668,
"repo_name": "mattjmorrison/ReportLab",
"id": "175587c353fff4143f74900b8a3c717a6758a6f4",
"size": "1559",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_docs_build.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "754736"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "3339889"
},
{
"name": "Shell",
"bytes": "1530"
}
],
"symlink_target": ""
} |
from .repocribro import create_app
#: Default Flask app for standard usage (export FLASK_APP)
app = create_app()
| {
"content_hash": "f78dae5b7c3b514cb8d4b80103436fab",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 58,
"avg_line_length": 28.5,
"alnum_prop": 0.7543859649122807,
"repo_name": "MarekSuchanek/repocribro",
"id": "a3ad9285e57855712b72c56bb4fffdb5ce82a381",
"size": "114",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "repocribro/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1996"
},
{
"name": "Dockerfile",
"bytes": "378"
},
{
"name": "HTML",
"bytes": "57114"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "223534"
}
],
"symlink_target": ""
} |
import os
from app import create_app, db
from app.models import users, backhosts, customers,backarchives,config
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'production')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db,users=users,backhosts=backhosts,customers=customers,backarchives=backarchives,config=config)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| {
"content_hash": "e83c3b97b929ba4251d5f034e87ab92b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 123,
"avg_line_length": 32.34615384615385,
"alnum_prop": 0.7217598097502973,
"repo_name": "linuxyan/BackManager",
"id": "0e8bbef928cf73652112549573d5b434d0b0ce5f",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19070"
},
{
"name": "HTML",
"bytes": "33455"
},
{
"name": "JavaScript",
"bytes": "44647"
},
{
"name": "Python",
"bytes": "51600"
}
],
"symlink_target": ""
} |
import sys
import time
args = sys.argv
if args[1:]:
# The 1st argument is taken to be the type
set = args[1]
else:
# default to sample
set = "sample"
secs = time.mktime(time.localtime())
# Override authorization
auth.override = True
session.s3.roles.append(ADMIN)
stylesheet_dir = os.path.join(request.folder, "static", "formats", "s3csv", "supply")
import_dir = os.path.join(request.folder, "private", "templates", "IFRC")
import_file = os.path.join(import_dir, "supply_item_category_ifrc_standard.csv")
stylesheet = os.path.join(stylesheet_dir, "item_category_ifrc_standard.xsl")
resource = s3db.resource("supply_item_category")
File = open(import_file, "r")
resource.import_xml(File,
format="csv",
stylesheet=stylesheet)
File.close()
if set == "sample":
# Sample of 100 Items
import_file = os.path.join(import_dir, "supply_item_ifrc_standard_sample.csv")
elif set == "eic":
# EIC ~3,000 Items
import_file = os.path.join(import_dir, "supply_item_eic.csv")
elif set == "complete":
# Complete ~11,000 Items
import_file = os.path.join(import_dir, "supply_item_ifrc_standard.csv")
stylesheet = os.path.join(stylesheet_dir, "item_ifrc_standard.xsl")
resource = s3db.resource("supply_item")
File = open(import_file, "r")
resource.import_xml(File,
format="csv",
stylesheet=stylesheet)
File.close()
db.commit()
auth.override = False
print "Total Time: %s" % (time.mktime(time.localtime()) - secs)
| {
"content_hash": "a11e922537dbf1899aa025dbfb2e2eee",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 85,
"avg_line_length": 29.25,
"alnum_prop": 0.6653517422748192,
"repo_name": "code-for-india/sahana_shelter_worldbank",
"id": "f36dc8f29ddfc33c5757f0f9a1fc0c92433dc0e1",
"size": "1862",
"binary": false,
"copies": "5",
"ref": "refs/heads/hackathon",
"path": "static/scripts/tools/import_supply_item_ifrc_standard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1214342"
},
{
"name": "JavaScript",
"bytes": "16755282"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "27298931"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2245739"
}
],
"symlink_target": ""
} |
from collections import defaultdict, OrderedDict
from django.contrib.contenttypes.models import ContentType
from apps.people.models import Membership
from apps.merchandise.music.models import Album, Single
def call_attributes(instance, attribute_list):
for attribute in attribute_list:
if hasattr(instance, attribute):
return (getattr(instance, attribute), attribute)
raise AttributeError
def dictify(d):
return {k: dictify(v) for k, v in d.items()} if isinstance(d, defaultdict) else d
def prefetch_relations(queryset):
# Because of the way generic relations work, the foreign keys of any
# content object can't be prefetched. We'll manually prefetch what we
# need for the Membership class and strap that to the existing queryset.
generics = {}
[generics.setdefault(item.content_type_id, set()).add(item.object_id) for item in queryset]
relations = {}
content_types = ContentType.objects.in_bulk(generics.keys())
for ct, fk_list in generics.items():
ct_model = content_types[ct].model_class()
if ct_model is Membership:
relations[ct] = ct_model.objects.select_related('idol', 'group').in_bulk(list(fk_list))
elif ct_model in [Album, Single]:
relations[ct] = ct_model.objects.prefetch_related('participating_idols', 'participating_groups').in_bulk(list(fk_list))
else:
relations[ct] = ct_model.objects.in_bulk(list(fk_list))
[setattr(item, '_content_object_cache', relations[item.content_type_id][item.object_id]) for item in queryset]
return queryset
def regroup_correlations(queryset):
objects = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for c in queryset:
objects[c.year][c.month][c.day].append(c)
# Remove the defaultdict-ness from the objects. Then, sort the nested
# dictionaries and then finally the main dictionary--all in reverse.
objects = dictify(objects)
for k, v in objects.iteritems():
objects[k] = OrderedDict(sorted(v.iteritems(), reverse=True))
return OrderedDict(sorted(objects.iteritems(), reverse=True))
def regroup_yearly_correlations(queryset):
objects = defaultdict(lambda: defaultdict(list))
for c in queryset:
objects[c.month][c.day].append(c)
# Remove the defaultdict-ness from the objects. Then, sort the nested
# dictionaries and then finally the main dictionary--all in reverse.
objects = dictify(objects)
for k, v in objects.iteritems():
objects[k] = OrderedDict(sorted(v.iteritems(), reverse=True))
return OrderedDict(sorted(objects.iteritems(), reverse=True))
| {
"content_hash": "3fd1e1a3973a4911470b61c69a53a995",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 131,
"avg_line_length": 40.84615384615385,
"alnum_prop": 0.703578154425612,
"repo_name": "hello-base/web",
"id": "35eb881fa0eb9220813c4122dab387ee13066c13",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/correlations/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155440"
},
{
"name": "CoffeeScript",
"bytes": "3290"
},
{
"name": "HTML",
"bytes": "187789"
},
{
"name": "Handlebars",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "21286"
},
{
"name": "Python",
"bytes": "345982"
},
{
"name": "Ruby",
"bytes": "352"
}
],
"symlink_target": ""
} |
"""
meerkat_api.py
Root Flask app for the Meerkat API.
"""
from meerkat_api.app import create_app
from meerkat_api.resources import alerts
from meerkat_api.resources import locations
from meerkat_api.resources import frontpage
from meerkat_api.resources import data
from meerkat_api.resources import export_data
from meerkat_api.resources import prescriptions
from meerkat_api.resources import explore
from meerkat_api.resources import variables
from meerkat_api.resources import reports
from meerkat_api.resources import map
from meerkat_api.resources import incidence
from meerkat_api.resources import indicators
from meerkat_api.resources import devices
from meerkat_api.resources import completeness
from meerkat_api.resources import epi_week
app = create_app()
@app.route('/')
def hello_world():
return "WHO"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True, use_reloader=False)
| {
"content_hash": "9e0cca0f09028bcd7ea9eae0c604906e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 28.8125,
"alnum_prop": 0.789587852494577,
"repo_name": "who-emro/meerkat_api",
"id": "f2e1b437c0d1463b163ae5f823d95ca09035dbe7",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "meerkat_api/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131822"
}
],
"symlink_target": ""
} |
"""
Training in batch
Reading data from a Python function
1. Feed data from a Python generator
2. Batch gradient descent for ridge regression
3. Reusing theta calculated in the previous batch
Gopi Subramanian
13-March-2016
"""
import tensorflow as tf
import numpy as np
from sklearn.datasets import make_regression
batch_size = 500
no_features = 50
no_batches = 10
epochs = 300
"""A Convienient function to return
training data in batch
"""
def train_data():
# Make a regression dataset with coeffiecients
sample_size = batch_size * no_batches
X, y = make_regression(n_samples = sample_size, n_features = no_features, n_targets =1 , noise = 0.05)
y = np.array([y]).T
# Add bias term
X = np.column_stack([np.ones(sample_size), X])
return (X, y)
input_data = train_data()
X = input_data[0]
y = input_data[1]
def batch_generator(start, end):
x_batch = X[start:end,:]
y_batch = y[start:end,:]
return (x_batch, y_batch)
# Build the graph
# Input placeholders
x = tf.placeholder(tf.float32, shape = [batch_size, no_features + 1], name = "x")
y_ = tf.placeholder(tf.float32, shape = [batch_size, 1], name = "x")
# Coeffiecients
theta = tf.Variable(tf.zeros([no_features + 1, 1]), name = "theta" )
alpha = tf.constant(0.001)
# Regression
# y = theta*x
y_pred = tf.matmul(x, theta)
ridge_term = alpha * (tf.reduce_sum(tf.square(theta)))
cost = tf.div( tf.reduce_sum( tf.square( tf.sub ( y_pred, y_ ) ) ) + ridge_term , 2*batch_size ,name = "cost")
rmse = tf.reduce_mean( tf.square ( tf.sub( y_pred, y_) ) )
# Gradient descent learning
learning_rate = 0.1
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# intialize variables and begin session
init = tf.initialize_all_variables()
session = tf.Session()
session.run(init)
# Train the model
for i in range(no_batches):
print 'Training batch %d'%(i+1)
start = 0
end = batch_size
batch = batch_generator(start, end)
start = end
end = end + batch_size
feed_dict = {x:batch[0],y_:batch[1]}
old_training_cost = 0
for j in range(epochs):
session.run(optimizer, feed_dict = feed_dict)
training_cost = session.run(cost, feed_dict = feed_dict)
if np.abs(training_cost - old_training_cost) < 0.00001:
print '\tTraining cost at iteration %d is %0.3f'%(j+1, training_cost)
break
old_training_cost = training_cost
print 'Evaluation at batch %d is %0.3f'%(i+1, session.run(rmse, feed_dict = feed_dict ))
# close session
session.close()
| {
"content_hash": "2647652299e85389de621403f04de3f8",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 110,
"avg_line_length": 25.824742268041238,
"alnum_prop": 0.67624750499002,
"repo_name": "subramgo/tflow",
"id": "654a4d2e7e8903ce60cabe906a8fb6eb8c22d7c4",
"size": "2505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "03 IO/03_01_Feeding_Data_Generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19108"
}
],
"symlink_target": ""
} |
__version__='3.3.0'
__doc__="""This modules defines a collection of markers used in charts.
The make* functions return a simple shape or a widget as for
the smiley.
"""
from reportlab.lib import colors
from reportlab.graphics.shapes import Rect, Line, Circle, Polygon
from reportlab.graphics.widgets.signsandsymbols import SmileyFace
def makeEmptySquare(x, y, size, color):
"Make an empty square marker."
d = size/2.0
rect = Rect(x-d, y-d, 2*d, 2*d)
rect.strokeColor = color
rect.fillColor = None
return rect
def makeFilledSquare(x, y, size, color):
"Make a filled square marker."
d = size/2.0
rect = Rect(x-d, y-d, 2*d, 2*d)
rect.strokeColor = color
rect.fillColor = color
return rect
def makeFilledDiamond(x, y, size, color):
"Make a filled diamond marker."
d = size/2.0
poly = Polygon((x-d,y, x,y+d, x+d,y, x,y-d))
poly.strokeColor = color
poly.fillColor = color
return poly
def makeEmptyCircle(x, y, size, color):
"Make a hollow circle marker."
d = size/2.0
circle = Circle(x, y, d)
circle.strokeColor = color
circle.fillColor = colors.white
return circle
def makeFilledCircle(x, y, size, color):
"Make a hollow circle marker."
d = size/2.0
circle = Circle(x, y, d)
circle.strokeColor = color
circle.fillColor = color
return circle
def makeSmiley(x, y, size, color):
"Make a smiley marker."
d = size
s = SmileyFace()
s.fillColor = color
s.x = x-d
s.y = y-d
s.size = d*2
return s
| {
"content_hash": "5d84776a9317aab4dc144fa1e32d4f40",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 71,
"avg_line_length": 20.012820512820515,
"alnum_prop": 0.6386931454196029,
"repo_name": "sandeepkoduri/GAE-html-to-pdf",
"id": "f02f6fc6f1b05ec4d8e8913032a584999bd76597",
"size": "1758",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libs/reportlab/graphics/charts/markers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Python",
"bytes": "8032247"
}
],
"symlink_target": ""
} |
'''
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
global test_obj_dict
test_util.test_dsc('Create test vm with Vlan SR and check')
vm = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm)
vm.check()
test_util.test_dsc('Reboot vm and check again')
vm.reboot()
vm.check()
vm.destroy()
test_util.test_pass('Vlan VR VM reboot Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| {
"content_hash": "0b4333d1dfa0182368842a7183efac47",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 27.321428571428573,
"alnum_prop": 0.673202614379085,
"repo_name": "zstackio/zstack-woodpecker",
"id": "1bf7d2b2d01582af17bde017e1d164243a9e441c",
"size": "765",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "integrationtest/vm/virtualrouter/vlan/test_reboot_vm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
class Element:
def __init__(self, key, value, pre=None, next=None):
self.key = key
self.value = value
self.pre = pre
self.next = next
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.capacity = capacity
self.mapping = {}
self.head = None
self.tail = None
self.size = 0
def append_to_head(self, node):
if not self.head:
self.head = node
self.tail = node
return
node.next = self.head
self.head.pre = node
self.head = node
def del_tail(self):
self.tail = self.tail.pre
self.mapping.pop(self.tail.next.key)
self.tail.next = None
def move_to_head(self, node):
if node is self.head:
return
if node is self.tail:
self.tail = self.tail.pre
self.tail.next = None
node.pre = None
node.next = self.head
# do not forget!
self.head.pre = node
self.head = node
return
node.pre.next = node.next
node.next.pre = node.pre
node.pre = None
node.next = self.head
# do not forget!
self.head.pre = node
self.head = node
return
# @return an integer
def get(self, key):
if key in self.mapping:
node = self.mapping[key]
self.move_to_head(node)
return node.value
else:
return -1
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
if key in self.mapping:
node = self.mapping[key]
node.value = value
self.move_to_head(node)
else:
node = Element(key, value)
self.size += 1
self.mapping[key] = node
self.append_to_head(node)
if self.size > self.capacity:
self.del_tail()
self.size -= 1 | {
"content_hash": "6e3cdbf756901ec5dfdadba94ab27843",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 56,
"avg_line_length": 26.93421052631579,
"alnum_prop": 0.5007327796775769,
"repo_name": "pikeszfish/Leetcode.py",
"id": "9561f77432ccc5b1242353b257969084c0133586",
"size": "2047",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leetcode.py/LRUCache.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22791"
}
],
"symlink_target": ""
} |
carPrice = int(input("Enter the amount of a car (in dollars only,no cents)"))
tax = 0.07
fees = 0.03
hiddenFees = 0.01
grandTotal = int(carPrice*tax) + int(carPrice*fees) + int(carPrice*hiddenFees) + carPrice
print("\nTax is: ", int(carPrice*tax))
print("fees are: ", int(carPrice*fees))
print("Hidden fees are: ", int(carPrice*hiddenFees))
print("\n\nGrand Total is: ", grandTotal)
input("\n\nPress enter to exit")
| {
"content_hash": "b434c2625ca26b72da94d8187dac023c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 89,
"avg_line_length": 30,
"alnum_prop": 0.6976190476190476,
"repo_name": "Ry09/Python-projects",
"id": "1d20ffa7c76766ee33103edd93a5358de084a860",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Programs & Challenges from Python for beginners book/Chapter 2/ch2_challenge4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13815"
}
],
"symlink_target": ""
} |
"""Test for the ee.element module."""
import unittest
import ee
from ee import apitestcase
class ElementTestCase(apitestcase.ApiTestCase):
def testSet(self):
"""Verifies Element.set() keyword argument interpretation."""
image = ee.Image(1)
# Constant dictionary.
def AssertProperties(expected, image):
properties = {}
while image.func == ee.ApiFunction.lookup('Element.set'):
key = image.args['key']
if not isinstance(key, basestring):
key = key.encode()
properties[key] = image.args['value']
image = image.args['object']
self.assertEquals(ee.Image(1), image)
self.assertEquals(expected, properties)
AssertProperties({'foo': 'bar'}, image.set({'foo': 'bar'}))
AssertProperties({'foo': 'bar'}, image.set({'properties': {'foo': 'bar'}}))
AssertProperties({'properties': 5}, image.set({'properties': 5}))
AssertProperties({'properties': {'foo': 'bar'}, 'baz': 'quux'},
image.set({'properties': {'foo': 'bar'}, 'baz': 'quux'}))
AssertProperties({'foo': 'bar', 'baz': 'quux'},
image.set('foo', 'bar', 'baz', 'quux'))
# Computed dictionary.
computed_arg = ee.ComputedObject(None, None, 'foo')
def CheckMultiProperties(result):
self.assertEquals(ee.ApiFunction.lookup('Element.setMulti'), result.func)
self.assertEquals(
{'object': image, 'properties': ee.Dictionary(computed_arg)},
result.args)
CheckMultiProperties(image.set(computed_arg))
CheckMultiProperties(image.set({'properties': computed_arg}))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "dad1b97088a3767a8a7d0b7eeb38ee41",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 33.06,
"alnum_prop": 0.6212946158499697,
"repo_name": "wemanuel/smry",
"id": "2039c2f5bcc412d30327f7a51c2c9c2627d20466",
"size": "1675",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "smry/server-auth/ee/tests/element_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
} |
"""
Support for interacting with Snapcast clients.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.snapcast/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN, PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_PORT, STATE_IDLE, STATE_OFF, STATE_ON,
STATE_PLAYING, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['snapcast==2.0.9']
_LOGGER = logging.getLogger(__name__)
DATA_KEY = 'snapcast'
SERVICE_SNAPSHOT = 'snapcast_snapshot'
SERVICE_RESTORE = 'snapcast_restore'
SUPPORT_SNAPCAST_CLIENT = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET
SUPPORT_SNAPCAST_GROUP = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET |\
SUPPORT_SELECT_SOURCE
GROUP_PREFIX = 'snapcast_group_'
GROUP_SUFFIX = 'Snapcast Group'
CLIENT_PREFIX = 'snapcast_client_'
CLIENT_SUFFIX = 'Snapcast Client'
SERVICE_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Snapcast platform."""
import snapcast.control
from snapcast.control.server import CONTROL_PORT
host = config.get(CONF_HOST)
port = config.get(CONF_PORT, CONTROL_PORT)
async def _handle_service(service):
"""Handle services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
devices = [device for device in hass.data[DATA_KEY]
if device.entity_id in entity_ids]
for device in devices:
if service.service == SERVICE_SNAPSHOT:
device.snapshot()
elif service.service == SERVICE_RESTORE:
await device.async_restore()
hass.services.async_register(
DOMAIN, SERVICE_SNAPSHOT, _handle_service, schema=SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RESTORE, _handle_service, schema=SERVICE_SCHEMA)
try:
server = await snapcast.control.create_server(
hass.loop, host, port, reconnect=True)
except socket.gaierror:
_LOGGER.error("Could not connect to Snapcast server at %s:%d",
host, port)
return
# Note: Host part is needed, when using multiple snapservers
hpid = '{}:{}'.format(host, port)
groups = [SnapcastGroupDevice(group, hpid) for group in server.groups]
clients = [SnapcastClientDevice(client, hpid) for client in server.clients]
devices = groups + clients
hass.data[DATA_KEY] = devices
async_add_entities(devices)
class SnapcastGroupDevice(MediaPlayerDevice):
"""Representation of a Snapcast group device."""
def __init__(self, group, uid_part):
"""Initialize the Snapcast group device."""
group.set_callback(self.schedule_update_ha_state)
self._group = group
self._uid = '{}{}_{}'.format(GROUP_PREFIX, uid_part,
self._group.identifier)
@property
def state(self):
"""Return the state of the player."""
return {
'idle': STATE_IDLE,
'playing': STATE_PLAYING,
'unknown': STATE_UNKNOWN,
}.get(self._group.stream_status, STATE_UNKNOWN)
@property
def unique_id(self):
"""Return the ID of snapcast group."""
return self._uid
@property
def name(self):
"""Return the name of the device."""
return '{}{}'.format(GROUP_PREFIX, self._group.identifier)
@property
def source(self):
"""Return the current input source."""
return self._group.stream
@property
def volume_level(self):
"""Return the volume level."""
return self._group.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._group.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SNAPCAST_GROUP
@property
def source_list(self):
"""List of available input sources."""
return list(self._group.streams_by_name().keys())
@property
def device_state_attributes(self):
"""Return the state attributes."""
name = '{} {}'.format(self._group.friendly_name, GROUP_SUFFIX)
return {
'friendly_name': name
}
@property
def should_poll(self):
"""Do not poll for state."""
return False
async def async_select_source(self, source):
"""Set input source."""
streams = self._group.streams_by_name()
if source in streams:
await self._group.set_stream(streams[source].identifier)
self.async_schedule_update_ha_state()
async def async_mute_volume(self, mute):
"""Send the mute command."""
await self._group.set_muted(mute)
self.async_schedule_update_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level."""
await self._group.set_volume(round(volume * 100))
self.async_schedule_update_ha_state()
def snapshot(self):
"""Snapshot the group state."""
self._group.snapshot()
async def async_restore(self):
"""Restore the group state."""
await self._group.restore()
class SnapcastClientDevice(MediaPlayerDevice):
"""Representation of a Snapcast client device."""
def __init__(self, client, uid_part):
"""Initialize the Snapcast client device."""
client.set_callback(self.schedule_update_ha_state)
self._client = client
self._uid = '{}{}_{}'.format(CLIENT_PREFIX, uid_part,
self._client.identifier)
@property
def unique_id(self):
"""
Return the ID of this snapcast client.
Note: Host part is needed, when using multiple snapservers
"""
return self._uid
@property
def name(self):
"""Return the name of the device."""
return '{}{}'.format(CLIENT_PREFIX, self._client.identifier)
@property
def volume_level(self):
"""Return the volume level."""
return self._client.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._client.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SNAPCAST_CLIENT
@property
def state(self):
"""Return the state of the player."""
if self._client.connected:
return STATE_ON
return STATE_OFF
@property
def device_state_attributes(self):
"""Return the state attributes."""
name = '{} {}'.format(self._client.friendly_name, CLIENT_SUFFIX)
return {
'friendly_name': name
}
@property
def should_poll(self):
"""Do not poll for state."""
return False
async def async_mute_volume(self, mute):
"""Send the mute command."""
await self._client.set_muted(mute)
self.async_schedule_update_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level."""
await self._client.set_volume(round(volume * 100))
self.async_schedule_update_ha_state()
def snapshot(self):
"""Snapshot the client state."""
self._client.snapshot()
async def async_restore(self):
"""Restore the client state."""
await self._client.restore()
| {
"content_hash": "478d33df250ad926c98d05fb44f46918",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 30.46511627906977,
"alnum_prop": 0.6208651399491094,
"repo_name": "tinloaf/home-assistant",
"id": "cfe2f997295580b4bd563aa0b472fdd10cd4bed3",
"size": "7860",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/snapcast.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
"""
:synopsis: Define the specialize script classes that will generate the script code.
"""
from .base import AbstractScript
class BashScript(AbstractScript):
"""Extended `AbstractScript` class for Bash script code generation.
Fills code variables for the request from `bash_template`.
Overrides `_generate_request` method to generate bash specific code.
"""
__language__ = 'bash'
def _generate_request(self):
code = self.code_nosearch.format(
method=self.details.get('method', ''),
url=self.url,
headers=self._generate_headers())
if self.search:
code += self.code_search.format(search_string=self.search.replace('"', '\\"'))
return code
class PHPScript(AbstractScript):
"""Extended `AbstractScript` class for PHP script code generation.
Fills code variables for the request from `php_template`.
Overrides `_generate_begin` method to generate php specific code.
"""
__language__ = 'php'
def _generate_begin(self):
return self.code_begin.format(url=self.url) + self._generate_headers()
class PythonScript(AbstractScript):
"""Extended `AbstractScript` class for Python script code generation.
Fills code variables for the request from `python_template`.
Overrides `_generate_begin` method to generate python specific code.
"""
__language__ = 'python'
def _generate_begin(self):
return self.code_begin.format(url=self.url, headers=str(self.headers))
class RubyScript(AbstractScript):
"""Extended `AbstractScript` class for Ruby script code generation.
Fills code variables for the request from `ruby_template`.
Overrides `_generate_begin` method to generate Ruby specific code.
"""
__language__ = 'ruby'
def _generate_begin(self):
code = self.code_begin.format(url=self.url, method=self.details.get('method', '').strip().lower())
code += self.code_headers.format(headers=self._generate_headers())
return code
| {
"content_hash": "96e207f6d47974bac1a046885b71adaa",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 106,
"avg_line_length": 30.402985074626866,
"alnum_prop": 0.6740304369170349,
"repo_name": "dhruvagarwal/http-request-translator",
"id": "2253b0b853182562cb4221c3b421882a6a85df17",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "hrt/script.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "70046"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014.11.04
'''
from top.api.base import RestApi
class WlbTradeorderGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.sub_trade_id = None
self.trade_id = None
self.trade_type = None
def getapiname(self):
return 'taobao.wlb.tradeorder.get'
| {
"content_hash": "1bbb84e33e8fbc9e6e80484bd213bae7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.6843575418994413,
"repo_name": "colaftc/webtool",
"id": "76ecf8c4a1cb1bef122fa9bb905333252c4c1f6f",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top/api/rest/WlbTradeorderGetRequest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12208"
},
{
"name": "HTML",
"bytes": "16773"
},
{
"name": "JavaScript",
"bytes": "2571"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "258023"
},
{
"name": "Ruby",
"bytes": "861"
},
{
"name": "VimL",
"bytes": "401921"
}
],
"symlink_target": ""
} |
from .base import Base
from .list import List
from .matrix import Matrix
from .tree import *
| {
"content_hash": "e9d7bc55cf5ef5970866143e0d149308",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 26,
"avg_line_length": 23.25,
"alnum_prop": 0.7741935483870968,
"repo_name": "zlsun/VisualAlgorithm",
"id": "202689bdb6633408db8398a1e2c649395fb24605",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/structures/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "309"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "37411"
}
],
"symlink_target": ""
} |
from pandas import DataFrame
import numpy as np
import nltk
from collections import Counter
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
def extract_sim_words(model, brand, result_path, freq_dist, min_count, save=True, topn=20):
df = DataFrame(columns=[['word', 'sim', 'freq']])
result = model.most_similar([model.docvecs[brand]], topn=topn)
if save:
for tup in result:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
df.to_csv(result_path + 'keywords/' + brand + "_sim_words.csv", index=False)
return
else:
for tup in result:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
return df
def extract_sim_brand(model, brand, result_path, save=True, topn=20):
df = DataFrame(columns=[['word', 'sim']])
result = model.docvecs.most_similar(brand, topn=topn)
if save:
for tup in result:
df.loc[len(df)] = [tup[0], tup[1]]
df.to_csv(result_path + 'keywords/' + brand + "_sim_brands.csv", index=False)
return
else:
for tup in result:
df.loc[len(df)] = [tup[0], tup[1]]
return df
def cal_mean_cluster(df_result, cluster_idx, doc2vec_model, group_name='Cluster'):
df = df_result[df_result[group_name] == cluster_idx]
names = list(df['Name'].unique())
all_arr = np.zeros((doc2vec_model.vector_size, len(names)))
for index, name in enumerate(names):
all_arr[:, index] = doc2vec_model.docvecs[name]
return all_arr.mean(axis=1)
def print_result(vector, model, freq_dist, min_count, topn=50):
df = DataFrame(columns=[['word','cos','freq']])
lst = model.most_similar([vector], topn=topn)
for tup in lst:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
return df
def save_brand_sim(model, sum_vector, name, save_path, topn=20):
df = DataFrame(columns=('brand','sim'))
lst = model.docvecs.most_similar([sum_vector], topn=topn)
for tup in lst:
df.loc[len(df)] = [tup[0], tup[1]]
df.to_csv(save_path + name + '_simBrands.csv', index=False)
return
# 각 브랜드의 단어 분포
def brand_raw_freq(documents, brand):
brand_review = []
for index, doc in enumerate(documents):
if doc.tags[0] == brand:
brand_review.append(doc.words)
brand_review = [word for sent in brand_review for word in sent]
corpus = nltk.Text(brand_review)
freq = nltk.FreqDist(corpus)
return brand_review, freq
def extract_keywords(score_df, brand, documents, selected, path, min_count = 100):
keywords = score_df[['word',brand]].sort_values(brand, ascending=False)
keywords.reset_index(inplace=True, drop=True)
review, freq = brand_freq(documents, selected, brand)
keyword_count = []
df = DataFrame(columns=[["단어","확률유사도","빈도"]])
for index, row in keywords.iterrows():
if freq[row['word']] >= min_count:
df.loc[len(df)] = [row['word'], row[brand], freq[row['word']]]
df.to_csv(path + '/keywords/' + brand + '_Keywords.csv', index=False)
def brand_freq(documents, selected_words, brand):
brand_review = []
for index, doc in enumerate(documents):
if doc.tags[0] == brand:
brand_review.append(selected_words[index])
brand_review = [word for sent in brand_review for word in sent]
corpus = nltk.Text(brand_review)
freq = nltk.FreqDist(corpus)
return brand_review, freq
def clustering(model):
brand_list = list(model.docvecs.doctags.keys())
hidden_size = model.vector_size
print("num of securities : %s, num of dimension : %s" % (len(brand_list), hidden_size))
doc_arr = np.zeros((len(brand_list), hidden_size))
for index, name in enumerate(brand_list):
doc_arr[index, :] = model.docvecs[name]
return brand_list, doc_arr
def tf_idf(documents, selected_words, brand_list, max_feature = 5000):
total_freq = Counter()
corpus = []
for brand in brand_list:
review, freq = brand_freq(documents, selected_words, brand)
total_freq += freq
doc = ' '.join(review)
corpus.append(doc)
total_freq = OrderedDict(sorted(total_freq.items(), key=lambda t: -t[1]))
vectorizer = TfidfVectorizer(max_features=max_feature)
tfidf_arr = vectorizer.fit_transform(corpus).toarray()
col_name = vectorizer.get_feature_names()
df_tfidf = DataFrame(columns=[col_name])
for i in range(len(brand_list)):
df_tfidf.loc[len(df_tfidf)] = tfidf_arr[i]
df_tfidf.set_index([brand_list], inplace=True) # 브랜드 이름을 index로
return df_tfidf
def softmax(x):
e = np.exp(x - np.max(x,axis=0,keepdims=True))
x = e / np.sum(e,axis=0,keepdims=True)
return x
def scoring(model, brand_list, selected, topn=2000):
# 각 브랜드 이름을 for 돌려서 해당하는 vector 정보를 X 라고 하자
embedding_size = model.vector_size
brands_size = len(brand_list)
X = np.zeros((brands_size, embedding_size))
for i, brand in enumerate(brand_list):
X[i] = model.docvecs[brand].flatten()
# 상위 동사, 부사, 형용사 목록(stopword 포함)
text = [word for sent in selected for word in sent]
corpus = nltk.Text(text)
freq = nltk.FreqDist(corpus)
top_freq_words = freq.most_common(topn)
B = X
m = len(B)
df_score = DataFrame(columns=[['word'] + brand_list])
for i, top in enumerate(top_freq_words):
w = model[top[0]] # 단어 벡터
x = np.dot(B, w).reshape((m, 1))
p = softmax(x)
# print p.T[0]
row = [top[0]] + p.T[0].tolist()
df_score.loc[i] = row
return df_score
def extract_words_by_score(df_score, brand, documents, selected, min_count = 100):
keywords = df_score[['word',brand]].sort_values(brand, ascending=False)
keywords.reset_index(inplace=True, drop=True)
review, freq = brand_freq(documents, selected, brand)
df = DataFrame(columns=[["단어","확률유사도","빈도"]])
for index, row in keywords.iterrows():
if freq[row['word']] >= min_count:
df.loc[len(df)] = [row['word'], row[brand], freq[row['word']]]
return df | {
"content_hash": "a4e7a34ba7338b15088b5f3cd2be7cdc",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 91,
"avg_line_length": 36.42690058479532,
"alnum_prop": 0.6217691443249318,
"repo_name": "dedert/Brand2Vec",
"id": "80c95f667bceaae16770ace602335044413b9c6c",
"size": "6373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modeling/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "425911"
},
{
"name": "Python",
"bytes": "26174"
}
],
"symlink_target": ""
} |
"""
Convenience functions for the construction of spatial weights based on
contiguity and distance criteria.
"""
__author__ = "Sergio J. Rey <[email protected]> "
from Contiguity import buildContiguity, Queen, Rook
from Distance import knnW, Kernel, DistanceBand
from util import get_ids, get_points_array_from_shapefile, min_threshold_distance
from ..io.FileIO import FileIO as ps_open
from .. import cg
from weights import WSP
import numpy as np
__all__ = ['queen_from_shapefile', 'rook_from_shapefile', 'knnW_from_array',
'knnW_from_shapefile', 'threshold_binaryW_from_array',
'threshold_binaryW_from_shapefile', 'threshold_continuousW_from_array',
'threshold_continuousW_from_shapefile', 'kernelW', 'kernelW_from_shapefile',
'adaptive_kernelW', 'adaptive_kernelW_from_shapefile',
'min_threshold_dist_from_shapefile', 'build_lattice_shapefile']
def queen_from_shapefile(shapefile, idVariable=None, sparse=False):
"""
Queen contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable : string
name of a column in the shapefile's DBF to use for ids.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"),"POLYID")
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wq.sparse.nnz *1. / wq.n**2
>>> "%.3f"%pct_sp
'0.098'
Notes
-----
Queen contiguity defines as neighbors any pair of polygons that share at
least one vertex in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
"""
w = Queen.from_shapefile(shapefile, idVariable=idVariable)
if sparse:
w = WSP(w.sparse, id_order=w.id_order)
return w
def rook_from_shapefile(shapefile, idVariable=None, sparse=False):
"""
Rook contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable: string
name of a column in the shapefile's DBF to use for ids
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), "POLYID")
>>> "%.3f"%wr.pct_nonzero
'8.330'
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wr.sparse.nnz *1. / wr.n**2
>>> "%.3f"%pct_sp
'0.083'
Notes
-----
Rook contiguity defines as neighbors any pair of polygons that share a
common edge in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
"""
w = Rook.from_shapefile(shapefile, idVariable=idVariable)
if sparse:
w = WSP(w.sparse, id_order=w.id_order)
return w
def spw_from_gal(galfile):
"""
Sparse scipy matrix for w from a gal file.
Parameters
----------
galfile : string
name of gal file including suffix
Returns
-------
spw : sparse_matrix
scipy sparse matrix in CSR format
ids : array
identifiers for rows/cols of spw
Examples
--------
>>> spw = pysal.weights.user.spw_from_gal(pysal.examples.get_path("sids2.gal"))
>>> spw.sparse.nnz
462
"""
return ps_open(galfile, 'r').read(sparse=True)
# Distance based weights
def knnW_from_array(array, k=2, p=2, ids=None, radius=None):
"""
Nearest neighbor weights from a numpy array.
Parameters
----------
data : array
(n,m)
attribute data, n observations on m attributes
k : int
number of nearest neighbors
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
ids : list
identifiers to attach to each observation
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with binary weights.
Examples
--------
>>> import numpy as np
>>> x,y=np.indices((5,5))
>>> x.shape=(25,1)
>>> y.shape=(25,1)
>>> data=np.hstack([x,y])
>>> wnn2=knnW_from_array(data,k=2)
>>> wnn4=knnW_from_array(data,k=4)
>>> set([1, 5, 6, 2]) == set(wnn4.neighbors[0])
True
>>> set([0, 1, 10, 6]) == set(wnn4.neighbors[5])
True
>>> set([1, 5]) == set(wnn2.neighbors[0])
True
>>> set([0,6]) == set(wnn2.neighbors[5])
True
>>> "%.2f"%wnn2.pct_nonzero
'8.00'
>>> wnn4.pct_nonzero
16.0
>>> wnn4=knnW_from_array(data,k=4)
>>> set([ 1,5,6,2]) == set(wnn4.neighbors[0])
True
Notes
-----
Ties between neighbors of equal distance are arbitrarily broken.
See Also
--------
:class:`pysal.weights.W`
"""
if radius is not None:
kdtree = cg.KDTree(array, distance_metric='Arc', radius=radius)
else:
kdtree = cg.KDTree(array)
return knnW(kdtree, k=k, p=p, ids=ids)
def knnW_from_shapefile(shapefile, k=2, p=2, idVariable=None, radius=None):
"""
Nearest neighbor weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
k : int
number of nearest neighbors
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with binary weights
Examples
--------
Polygon shapefile
>>> wc=knnW_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.4f"%wc.pct_nonzero
'4.0816'
>>> set([2,1]) == set(wc.neighbors[0])
True
>>> wc3=pysal.knnW_from_shapefile(pysal.examples.get_path("columbus.shp"),k=3)
>>> set(wc3.neighbors[0]) == set([2,1,3])
True
>>> set(wc3.neighbors[2]) == set([4,3,0])
True
1 offset rather than 0 offset
>>> wc3_1=knnW_from_shapefile(pysal.examples.get_path("columbus.shp"),k=3,idVariable="POLYID")
>>> set([4,3,2]) == set(wc3_1.neighbors[1])
True
>>> wc3_1.weights[2]
[1.0, 1.0, 1.0]
>>> set([4,1,8]) == set(wc3_1.neighbors[2])
True
Point shapefile
>>> w=knnW_from_shapefile(pysal.examples.get_path("juvenile.shp"))
>>> w.pct_nonzero
1.1904761904761905
>>> w1=knnW_from_shapefile(pysal.examples.get_path("juvenile.shp"),k=1)
>>> "%.3f"%w1.pct_nonzero
'0.595'
>>>
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
Ties between neighbors of equal distance are arbitrarily broken.
See Also
--------
:class:`pysal.weights.W`
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
kdtree = cg.KDTree(data, distance_metric='Arc', radius=radius)
else:
kdtree = cg.KDTree(data)
if idVariable:
ids = get_ids(shapefile, idVariable)
return knnW(kdtree, k=k, p=p, ids=ids)
return knnW(kdtree, k=k, p=p)
def threshold_binaryW_from_array(array, threshold, p=2, radius=None):
"""
Binary weights based on a distance threshold.
Parameters
----------
array : array
(n,m)
attribute data, n observations on m attributes
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance
Weights object with binary weights
Examples
--------
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> wcheck = pysal.W({0: [1, 3], 1: [0, 3, ], 2: [], 3: [1, 0], 4: [5], 5: [4]})
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> w=threshold_binaryW_from_array(points,threshold=11.2)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> pysal.weights.util.neighbor_equality(w, wcheck)
True
>>>
"""
if radius is not None:
array = cg.KDTree(array, distance_metric='Arc', radius=radius)
return DistanceBand(array, threshold=threshold, p=p)
def threshold_binaryW_from_shapefile(shapefile, threshold, p=2, idVariable=None, radius=None):
"""
Threshold distance based binary weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance
Weights object with binary weights
Examples
--------
>>> w = threshold_binaryW_from_shapefile(pysal.examples.get_path("columbus.shp"),0.62,idVariable="POLYID")
>>> w.weights[1]
[1, 1]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
data = cg.KDTree(data, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
w = DistanceBand(data, threshold=threshold, p=p)
w.remap_ids(ids)
return w
return threshold_binaryW_from_array(data, threshold, p=p)
def threshold_continuousW_from_array(array, threshold, p=2,
alpha=-1, radius=None):
"""
Continuous weights based on a distance threshold.
Parameters
----------
array : array
(n,m)
attribute data, n observations on m attributes
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
alpha : float
distance decay parameter for weight (default -1.0)
if alpha is positive the weights will not decline with
distance.
radius : If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with continuous weights.
Examples
--------
inverse distance weights
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> wid=threshold_continuousW_from_array(points,11.2)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> wid.weights[0]
[0.10000000000000001, 0.089442719099991588]
gravity weights
>>> wid2=threshold_continuousW_from_array(points,11.2,alpha=-2.0)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> wid2.weights[0]
[0.01, 0.0079999999999999984]
"""
if radius is not None:
array = cg.KDTree(array, distance_metric='Arc', radius=radius)
w = DistanceBand(
array, threshold=threshold, p=p, alpha=alpha, binary=False)
return w
def threshold_continuousW_from_shapefile(shapefile, threshold, p=2,
alpha=-1, idVariable=None, radius=None):
"""
Threshold distance based continuous weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
alpha : float
distance decay parameter for weight (default -1.0)
if alpha is positive the weights will not decline with
distance.
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with continuous weights.
Examples
--------
>>> w = threshold_continuousW_from_shapefile(pysal.examples.get_path("columbus.shp"),0.62,idVariable="POLYID")
>>> w.weights[1]
[1.6702346893743334, 1.7250729841938093]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
data = cg.KDTree(data, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
w = DistanceBand(data, threshold=threshold, p=p, alpha=alpha, binary=False)
w.remap_ids(ids)
else:
w = threshold_continuousW_from_array(data, threshold, p=p, alpha=alpha)
w.set_shapefile(shapefile,idVariable)
return w
# Kernel Weights
def kernelW(points, k=2, function='triangular', fixed=True,
radius=None, diagonal=False):
"""
Kernel based weights.
Parameters
----------
points : array
(n,k)
n observations on k characteristics used to measure
distances between the n objects
k : int
the number of nearest neighbors to use for determining
bandwidth. Bandwidth taken as :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation).
function : {'triangular','uniform','quadratic','epanechnikov','quartic','bisquare','gaussian'}
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
epanechnikov
.. math::
K(z) = (1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
bisquare
.. math::
K(z) = (1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
fixed : boolean
If true then :math:`h_i=h \\forall i`. If false then
bandwidth is adaptive across observations.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> kw=kernelW(points)
>>> kw.weights[0]
[1.0, 0.500000049999995, 0.4409830615267465]
>>> kw.neighbors[0]
[0, 1, 3]
>>> kw.bandwidth
array([[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002]])
use different k
>>> kw=kernelW(points,k=3)
>>> kw.neighbors[0]
[0, 1, 3, 4]
>>> kw.bandwidth
array([[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201]])
Diagonals to 1.0
>>> kq = kernelW(points,function='gaussian')
>>> kq.weights
{0: [0.3989422804014327, 0.35206533556593145, 0.3412334260702758], 1: [0.35206533556593145, 0.3989422804014327, 0.2419707487162134, 0.3412334260702758, 0.31069657591175387], 2: [0.2419707487162134, 0.3989422804014327, 0.31069657591175387], 3: [0.3412334260702758, 0.3412334260702758, 0.3989422804014327, 0.3011374490937829, 0.26575287272131043], 4: [0.31069657591175387, 0.31069657591175387, 0.3011374490937829, 0.3989422804014327, 0.35206533556593145], 5: [0.26575287272131043, 0.35206533556593145, 0.3989422804014327]}
>>> kqd = kernelW(points, function='gaussian', diagonal=True)
>>> kqd.weights
{0: [1.0, 0.35206533556593145, 0.3412334260702758], 1: [0.35206533556593145, 1.0, 0.2419707487162134, 0.3412334260702758, 0.31069657591175387], 2: [0.2419707487162134, 1.0, 0.31069657591175387], 3: [0.3412334260702758, 0.3412334260702758, 1.0, 0.3011374490937829, 0.26575287272131043], 4: [0.31069657591175387, 0.31069657591175387, 0.3011374490937829, 1.0, 0.35206533556593145], 5: [0.26575287272131043, 0.35206533556593145, 1.0]}
"""
if radius is not None:
points = cg.KDTree(points, distance_metric='Arc', radius=radius)
return Kernel(points, function=function, k=k, fixed=fixed,
diagonal=diagonal)
def kernelW_from_shapefile(shapefile, k=2, function='triangular',
idVariable=None, fixed=True, radius=None, diagonal=False):
"""
Kernel based weights.
Parameters
----------
shapefile : string
shapefile name with shp suffix
k : int
the number of nearest neighbors to use for determining
bandwidth. Bandwidth taken as :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation).
function : {'triangular','uniform','quadratic','epanechnikov', 'quartic','bisquare','gaussian'}
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
epanechnikov
.. math::
K(z) = (1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
bisquare
.. math::
K(z) = (1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
idVariable : string
name of a column in the shapefile's DBF to use for ids
fixed : binary
If true then :math:`h_i=h \\forall i`. If false then
bandwidth is adaptive across observations.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> kw = pysal.kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"),idVariable='POLYID', function = 'gaussian')
>>> kwd = pysal.kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"),idVariable='POLYID', function = 'gaussian', diagonal = True)
>>> set(kw.neighbors[1]) == set([4, 2, 3, 1])
True
>>> set(kwd.neighbors[1]) == set([4, 2, 3, 1])
True
>>>
>>> set(kw.weights[1]) == set( [0.2436835517263174, 0.29090631630909874, 0.29671172124745776, 0.3989422804014327])
True
>>> set(kwd.weights[1]) == set( [0.2436835517263174, 0.29090631630909874, 0.29671172124745776, 1.0])
True
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
points = cg.KDTree(points, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
return Kernel(points, function=function, k=k, ids=ids, fixed=fixed,
diagonal = diagonal)
return kernelW(points, k=k, function=function, fixed=fixed,
diagonal=diagonal)
def adaptive_kernelW(points, bandwidths=None, k=2, function='triangular',
radius=None, diagonal=False):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
points : array
(n,k)
n observations on k characteristics used to measure
distances between the n objects
bandwidths : float
or array-like (optional)
the bandwidth :math:`h_i` for the kernel.
if no bandwidth is specified k is used to determine the
adaptive bandwidth
k : int
the number of nearest neighbors to use for determining
bandwidth. For fixed bandwidth, :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation). For adaptive bandwidths, :math:`h_i=dknn_i`
function : {'triangular','uniform','quadratic','quartic','gaussian'}
kernel function defined as follows with
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
User specified bandwidths
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> bw=[25.0,15.0,25.0,16.0,14.5,25.0]
>>> kwa=adaptive_kernelW(points,bandwidths=bw)
>>> kwa.weights[0]
[1.0, 0.6, 0.552786404500042, 0.10557280900008403]
>>> kwa.neighbors[0]
[0, 1, 3, 4]
>>> kwa.bandwidth
array([[ 25. ],
[ 15. ],
[ 25. ],
[ 16. ],
[ 14.5],
[ 25. ]])
Endogenous adaptive bandwidths
>>> kwea=adaptive_kernelW(points)
>>> kwea.weights[0]
[1.0, 0.10557289844279438, 9.99999900663795e-08]
>>> kwea.neighbors[0]
[0, 1, 3]
>>> kwea.bandwidth
array([[ 11.18034101],
[ 11.18034101],
[ 20.000002 ],
[ 11.18034101],
[ 14.14213704],
[ 18.02775818]])
Endogenous adaptive bandwidths with Gaussian kernel
>>> kweag=adaptive_kernelW(points,function='gaussian')
>>> kweag.weights[0]
[0.3989422804014327, 0.2674190291577696, 0.2419707487162134]
>>> kweag.bandwidth
array([[ 11.18034101],
[ 11.18034101],
[ 20.000002 ],
[ 11.18034101],
[ 14.14213704],
[ 18.02775818]])
with diagonal
>>> kweag = pysal.adaptive_kernelW(points, function='gaussian')
>>> kweagd = pysal.adaptive_kernelW(points, function='gaussian', diagonal=True)
>>> kweag.neighbors[0]
[0, 1, 3]
>>> kweagd.neighbors[0]
[0, 1, 3]
>>> kweag.weights[0]
[0.3989422804014327, 0.2674190291577696, 0.2419707487162134]
>>> kweagd.weights[0]
[1.0, 0.2674190291577696, 0.2419707487162134]
"""
if radius is not None:
points = cg.KDTree(points, distance_metric='Arc', radius=radius)
return Kernel(points, bandwidth=bandwidths, fixed=False, k=k,
function=function, diagonal=diagonal)
def adaptive_kernelW_from_shapefile(shapefile, bandwidths=None, k=2, function='triangular',
idVariable=None, radius=None,
diagonal = False):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
shapefile : string
shapefile name with shp suffix
bandwidths : float
or array-like (optional)
the bandwidth :math:`h_i` for the kernel.
if no bandwidth is specified k is used to determine the
adaptive bandwidth
k : int
the number of nearest neighbors to use for determining
bandwidth. For fixed bandwidth, :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation). For adaptive bandwidths, :math:`h_i=dknn_i`
function : {'triangular','uniform','quadratic','quartic','gaussian'}
kernel function defined as follows with
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> kwa = pysal.adaptive_kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"), function='gaussian')
>>> kwad = pysal.adaptive_kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"), function='gaussian', diagonal=True)
>>> kwa.neighbors[0]
[0, 2, 1]
>>> kwad.neighbors[0]
[0, 2, 1]
>>> kwa.weights[0]
[0.3989422804014327, 0.24966013701844503, 0.2419707487162134]
>>> kwad.weights[0]
[1.0, 0.24966013701844503, 0.2419707487162134]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
points = cg.KDTree(points, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
return Kernel(points, bandwidth=bandwidths, fixed=False, k=k,
function=function, ids=ids, diagonal=diagonal)
return adaptive_kernelW(points, bandwidths=bandwidths, k=k,
function=function, diagonal=diagonal)
def min_threshold_dist_from_shapefile(shapefile, radius=None, p=2):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
shapefile : string
shapefile name with shp suffix.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
Returns
-------
d : float
Maximum nearest neighbor distance between the n
observations.
Examples
--------
>>> md = min_threshold_dist_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> md
0.61886415807685413
>>> min_threshold_dist_from_shapefile(pysal.examples.get_path("stl_hom.shp"), pysal.cg.sphere.RADIUS_EARTH_MILES)
31.846942936393717
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
kdt = cg.kdtree.Arc_KDTree(points, radius=radius)
nn = kdt.query(kdt.data, k=2)
nnd = nn[0].max(axis=0)[1]
return nnd
return min_threshold_distance(points, p)
def build_lattice_shapefile(nrows, ncols, outFileName):
"""
Build a lattice shapefile with nrows rows and ncols cols.
Parameters
----------
nrows : int
Number of rows
ncols : int
Number of cols
outFileName : str
shapefile name with shp suffix
Returns
-------
None
"""
if not outFileName.endswith('.shp'):
raise ValueError("outFileName must end with .shp")
o = ps_open(outFileName, 'w')
dbf_name = outFileName.split(".")[0] + ".dbf"
d = ps_open(dbf_name, 'w')
d.header = [ 'ID' ]
d.field_spec = [ ('N', 8, 0) ]
c = 0
for i in xrange(nrows):
for j in xrange(ncols):
ll = i, j
ul = i, j + 1
ur = i + 1, j + 1
lr = i + 1, j
o.write(cg.Polygon([ll, ul, ur, lr, ll]))
d.write([c])
c += 1
d.close()
o.close()
def _test():
import doctest
# the following line could be used to define an alternative to the '<BLANKLINE>' flag
#doctest.BLANKLINE_MARKER = 'something better than <BLANKLINE>'
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
| {
"content_hash": "76dea52a14fd3e9ecc4f5bca2fc5ad46",
"timestamp": "",
"source": "github",
"line_count": 1141,
"max_line_length": 524,
"avg_line_length": 29.879929886064854,
"alnum_prop": 0.5466518053559382,
"repo_name": "ljwolf/pysal_core",
"id": "21144d696660f4f475e89efc296710b43da721be",
"size": "34093",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libpysal/weights/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "Python",
"bytes": "828668"
},
{
"name": "Shell",
"bytes": "186"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from .comment_handler import CommentHandler
from .error_handler import Error
from .jsx_parser import JSXParser
from .jsx_syntax import JSXSyntax
from .objects import toDict
from .parser import Parser
from .syntax import Syntax
from .tokenizer import Tokenizer
from .visitor import NodeVisitor
from . import nodes
from . import jsx_nodes
__all__ = ['Syntax', 'JSXSyntax', 'Error', 'NodeVisitor', 'nodes', 'jsx_nodes',
'parse', 'parseModule', 'parseScript', 'tokenize', 'toDict']
def parse(code, options=None, delegate=None, **kwargs):
options = {} if options is None else options.copy()
options.update(kwargs)
# ESNext presset:
if options.get('esnext', False):
options['jsx'] = True
options['classProperties'] = True
commentHandler = None
def proxyDelegate(node, metadata):
if delegate:
new_node = delegate(node, metadata)
if new_node is not None:
node = new_node
if commentHandler:
commentHandler.visit(node, metadata)
return node
parserDelegate = None if delegate is None else proxyDelegate
collectComment = options.get('comment', False)
attachComment = options.get('attachComment', False)
if collectComment or attachComment:
commentHandler = CommentHandler()
commentHandler.attach = attachComment
options['comment'] = True
parserDelegate = proxyDelegate
isModule = options.get('sourceType', 'script') == 'module'
if options.get('jsx', False):
parser = JSXParser(code, options=options, delegate=parserDelegate)
else:
parser = Parser(code, options=options, delegate=parserDelegate)
ast = parser.parseModule() if isModule else parser.parseScript()
if collectComment and commentHandler:
ast.comments = commentHandler.comments
if parser.config.tokens:
ast.tokens = parser.tokens
if parser.config.tolerant:
ast.errors = parser.errorHandler.errors
return ast
def parseModule(code, options=None, delegate=None, **kwargs):
kwargs['sourceType'] = 'module'
return parse(code, options, delegate, **kwargs)
def parseScript(code, options=None, delegate=None, **kwargs):
kwargs['sourceType'] = 'script'
return parse(code, options, delegate, **kwargs)
def tokenize(code, options=None, delegate=None, **kwargs):
options = {} if options is None else options.copy()
options.update(kwargs)
tokenizer = Tokenizer(code, options)
class Tokens(list):
pass
tokens = Tokens()
try:
while True:
token = tokenizer.getNextToken()
if not token:
break
if delegate:
token = delegate(token)
tokens.append(token)
except Error as e:
tokenizer.errorHandler.tolerate(e)
if tokenizer.errorHandler.tolerant:
tokens.errors = tokenizer.errors()
return tokens
| {
"content_hash": "b74933d5eec0e8b7c265a13599f2b500",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.6606965174129353,
"repo_name": "mp-coder/translate-dev-tools",
"id": "b22455100e6f8be070ff584e5f4c1e4d71b421db",
"size": "4382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esprima/esprima.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "278126"
}
],
"symlink_target": ""
} |
from tkinter import Tk, Text, BOTH, DISABLED, NORMAL, END
APPNAME = "WRITER TEST"
WELCOME_MSG = "Welcome to {}\n".format(APPNAME)
class Output(Text):
def __init__(self, parent):
Text.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
self.parent.title("{}".format(APPNAME))
self.parent.geometry('{}x{}'.format(sw,sh))
self.parent.bind('<KeyPress>', self.onKeyPress)
self.pack(fill=BOTH, expand=1)
self.config(background='black', foreground='white', font=('Courier', 12))
self.insert('end', '{}\n'.format(WELCOME_MSG))
self.config(state=DISABLED)
def onKeyPress(self, event):
self.config(state=NORMAL)
self.delete(1.0, END)
self.insert('end', 'You pressed: {}'.format(event.keycode))
self.config(state=DISABLED)
class UI():
def __init__(self):
self.root = Tk()
self.app = Output(self.root)
self.root.mainloop()
ui = UI() | {
"content_hash": "fb472fbe5dd0dfd520d452fde24ba981",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 81,
"avg_line_length": 29.076923076923077,
"alnum_prop": 0.5758377425044092,
"repo_name": "daviaws/missionText",
"id": "020786b949349a53cf4db9ee348166f29a7eb6d5",
"size": "1177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_key_code/UI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37167"
}
],
"symlink_target": ""
} |
from collections.abc import Awaitable
from inspect import CO_ITERABLE_COROUTINE
from types import CoroutineType
from types import GeneratorType
async def do_await(obj):
return await obj
def do_yield_from(gen):
return (yield from gen)
def await_(obj):
obj_type = type(obj)
if (
obj_type is CoroutineType
or obj_type is GeneratorType
and bool(obj.gi_code.co_flags & CO_ITERABLE_COROUTINE)
or isinstance(obj, Awaitable)
):
return do_await(obj).__await__()
else:
return do_yield_from(obj)
def __aiter__(self):
return self.__wrapped__.__aiter__()
async def __anext__(self):
return await self.__wrapped__.__anext__()
def __await__(self):
return await_(self.__wrapped__)
def __aenter__(self):
return self.__wrapped__.__aenter__()
def __aexit__(self, *args, **kwargs):
return self.__wrapped__.__aexit__(*args, **kwargs)
def identity(obj):
return obj
class cached_property(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| {
"content_hash": "f0553b03eb96b877a13b76f7522f8338",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 65,
"avg_line_length": 20.216666666666665,
"alnum_prop": 0.606760098928277,
"repo_name": "ionelmc/python-lazy-object-proxy",
"id": "99945f460b2efaaa9cc35c1a3a9612a31f30a41d",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lazy_object_proxy/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "38137"
},
{
"name": "Python",
"bytes": "113997"
}
],
"symlink_target": ""
} |
import zookeeper, time, threading
f = open("out.log","w")
zookeeper.set_log_stream(f)
connected = False
conn_cv = threading.Condition( )
def my_connection_watcher(handle,type,state,path):
global connected, conn_cv
print("Connected, handle is ", handle)
conn_cv.acquire()
connected = True
conn_cv.notifyAll()
conn_cv.release()
conn_cv.acquire()
print("Connecting to localhost:2181 -- ")
handle = zookeeper.init("localhost:2181", my_connection_watcher, 10000, 0)
while not connected:
conn_cv.wait()
conn_cv.release()
def my_getc_watch( handle, type, state, path ):
print("Watch fired -- ")
print(type, state, path)
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"};
try:
zookeeper.create(handle, "/zk-python", "data", [ZOO_OPEN_ACL_UNSAFE], 0)
zookeeper.get_children(handle, "/zk-python", my_getc_watch)
for i in xrange(5):
print("Creating sequence node ", i, " ", zookeeper.create(handle, "/zk-python/sequencenode", "data", [ZOO_OPEN_ACL_UNSAFE], zookeeper.SEQUENCE ))
except:
pass
def pp_zk(handle,root, indent = 0):
"""Pretty print(a zookeeper tree, starting at root)"""
def make_path(child):
if root == "/":
return "/" + child
return root + "/" + child
children = zookeeper.get_children(handle, root, None)
out = ""
for i in xrange(indent):
out += "\t"
out += "|---"+root + " :: " + zookeeper.get(handle, root, None)[0]
print(out)
for child in children:
pp_zk(handle,make_path(child),indent+1)
print("ZNode tree -- ")
pp_zk(handle,"/")
print("Getting ACL / Stat for /zk-python --")
(stat, acl) = zookeeper.get_acl(handle, "/zk-python")
print("Stat:: ", stat)
print("Acl:: ", acl)
| {
"content_hash": "557e9bd65b9af735c03548d0fa79ef89",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 153,
"avg_line_length": 29.25,
"alnum_prop": 0.6273504273504273,
"repo_name": "apache/zookeeper",
"id": "24986e3aac8a55d9f996ea07e082ef7bc4aba831",
"size": "2539",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "zookeeper-contrib/zookeeper-contrib-zkpython/src/python/zk.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10133"
},
{
"name": "C",
"bytes": "627863"
},
{
"name": "C++",
"bytes": "708872"
},
{
"name": "CMake",
"bytes": "10533"
},
{
"name": "CSS",
"bytes": "22915"
},
{
"name": "Dockerfile",
"bytes": "1030"
},
{
"name": "HTML",
"bytes": "43586"
},
{
"name": "Java",
"bytes": "6889310"
},
{
"name": "JavaScript",
"bytes": "246638"
},
{
"name": "M4",
"bytes": "52000"
},
{
"name": "Makefile",
"bytes": "11657"
},
{
"name": "Mako",
"bytes": "13704"
},
{
"name": "Perl",
"bytes": "87261"
},
{
"name": "Python",
"bytes": "163305"
},
{
"name": "Raku",
"bytes": "67310"
},
{
"name": "Shell",
"bytes": "116200"
},
{
"name": "XS",
"bytes": "68654"
},
{
"name": "XSLT",
"bytes": "6024"
}
],
"symlink_target": ""
} |
import datetime
from Models.Submission import Submission
from Core.Database import Database
from Core.Scorer import Scorer
from Core.Executer import Executer
from Core.Parser import Parser
class Grader():
def __init__(self):
self._session = Database.session()
def run(self, anonymous_student_id, student_response, problem_id):
submission = self._save_submission(anonymous_student_id, student_response, problem_id)
if submission.error:
return Grader._response(False)
fail_messages = {}
submissions = Submission.get_last_submissions_each_user(submission.problem_id)
for s in submissions:
messages = self._grader_execute(submission, s)
if messages:
fail_messages[s.student_id] = messages
if not s.id == submission.id:
self._grader_execute(s, submission)
return Grader._response(fail_messages=fail_messages)
def _grader_execute(self, submission_program, submission_test):
test_result, fail_messages = Executer.run_test(submission_program, submission_test)
self._session.add(test_result)
self._session.commit()
Scorer(submission_program.student_id, submission_test.student_id, test_result).start()
return fail_messages
def _save_submission(self, anonymous_student_id, student_response, problem_id):
program, test = Parser.parse(student_response)
new_submission = Submission(datetime.datetime.now(), anonymous_student_id, problem_id, program, test, False)
test_result, fail_messages = Executer.run_test(new_submission, new_submission)
new_submission.error = True if test_result.errors > 0 else False
submission_exists = Submission.get_submission_user(new_submission.student_id, problem_id)
if submission_exists:
Scorer.resubmission_score(new_submission.student_id, -100)
Scorer(None, None, None).get_score(new_submission.student_id)
self._session.add(new_submission)
self._session.commit()
self._session.expunge(new_submission)
self._session.close()
return new_submission
@staticmethod
def _response(correct=True, fail_messages=None):
if not correct:
title = "<h3 style='color:red'><strong>Erro encontrado no Código.</strong></h3>"
msg = "<p>Execute localmente em sua máquina os testes do seu programa antes de submetê-lo.</p>"
else:
title = "<h3><strong>Submissão aceita e pontuada.</strong></h3>"
if fail_messages:
if len(fail_messages) > 1:
msg = "<p>Os casos de testes de {} alunos encontraram falhas no seu programa.</p>".format(len(fail_messages))
else:
msg = "<p>Os casos de testes de 1 aluno encontrou falhas no seu programa.</p>"
fail_msg = "<pre style='color:red;'>{}</pre>".format(list(fail_messages.values())[0][0])
msg = "{}<p><strong>Mensagem de falha:</strong></p>{}".format(msg, fail_msg)
else:
msg = "<p>Não foram encontradas falhas no seu programa por outros alunos.</p>"
return {"correct": correct, "score": 1, "msg": "{}\n{}".format(title, msg)}
| {
"content_hash": "1c469cd97eb342c8d647749a7e495665",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 129,
"avg_line_length": 43.84,
"alnum_prop": 0.6386861313868614,
"repo_name": "brnomendes/grader-edx",
"id": "92a5c4abb10045ba60521150fcb257f838c2d9c5",
"size": "3293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Core/Grader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "972"
},
{
"name": "Python",
"bytes": "15921"
},
{
"name": "Shell",
"bytes": "55"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/item/component/shared_item_electronic_power_conditioner.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "c6436500edaa65867bc530fe43340bf1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 103,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.7108433734939759,
"repo_name": "obi-two/Rebelion",
"id": "e4365e3821cb2c6c7233ed40d76c66849030e4ce",
"size": "477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/item/component/shared_item_electronic_power_conditioner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.