text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import absolute_import
from django.db import IntegrityError, transaction
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.team import TeamEndpoint, TeamPermission
from sentry.api.serializers import serialize
from sentry.models import Project, ProjectStatus, AuditLogEntryEvent
from sentry.signals import project_created
from sentry.utils.apidocs import scenario, attach_scenarios
from sentry.utils.samples import create_sample_event
@scenario('ListTeamProjects')
def list_team_projects_scenario(runner):
runner.request(
method='GET',
path='/teams/%s/%s/projects/' % (
runner.org.slug, runner.default_team.slug)
)
@scenario('CreateNewProject')
def create_project_scenario(runner):
runner.request(
method='POST',
path='/teams/%s/%s/projects/' % (
runner.org.slug, runner.default_team.slug),
data={
'name': 'The Spoiled Yoghurt'
}
)
class ProjectSerializer(serializers.Serializer):
name = serializers.CharField(max_length=64, required=True)
slug = serializers.RegexField(r'^[a-z0-9_\-]+$', max_length=50,
required=False)
# While currently the UI suggests teams are a parent of a project, in reality
# the project is the core component, and which team it is on is simply an
# attribute. Because you can already change the team of a project via mutating
# it, and because Sentry intends to remove teams as a hierarchy item, we
# allow you to view a teams projects, as well as create a new project as long
# as you are a member of that team and have project scoped permissions.
class TeamProjectPermission(TeamPermission):
scope_map = {
'GET': ['project:read', 'project:write', 'project:delete'],
'POST': ['project:write', 'project:delete'],
'PUT': ['project:write', 'project:delete'],
'DELETE': ['project:delete'],
}
class TeamProjectIndexEndpoint(TeamEndpoint):
doc_section = DocSection.TEAMS
permission_classes = (TeamProjectPermission,)
@attach_scenarios([list_team_projects_scenario])
def get(self, request, team):
"""
List a Team's Projects
``````````````````````
Return a list of projects bound to a team.
:pparam string organization_slug: the slug of the organization the
team belongs to.
:pparam string team_slug: the slug of the team to list the projects of.
:auth: required
"""
if request.user.is_authenticated():
results = list(Project.objects.get_for_user(
team=team, user=request.user))
else:
# TODO(dcramer): status should be selectable
results = list(Project.objects.filter(
team=team,
status=ProjectStatus.VISIBLE,
))
return Response(serialize(results, request.user))
@attach_scenarios([create_project_scenario])
def post(self, request, team):
"""
Create a New Project
````````````````````
Create a new project bound to a team.
:pparam string organization_slug: the slug of the organization the
team belongs to.
:pparam string team_slug: the slug of the team to create a new project
for.
:param string name: the name for the new project.
:param string slug: optionally a slug for the new project. If it's
not provided a slug is generated from the name.
:auth: required
"""
serializer = ProjectSerializer(data=request.DATA)
if serializer.is_valid():
result = serializer.object
try:
with transaction.atomic():
project = Project.objects.create(
name=result['name'],
slug=result.get('slug'),
organization=team.organization,
team=team
)
except IntegrityError:
return Response(
{'detail': 'A project with this slug already exists.'},
status=409,
)
# XXX: create sample event?
self.create_audit_entry(
request=request,
organization=team.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
project_created.send(project=project, user=request.user, sender=self)
create_sample_event(project, platform='javascript')
return Response(serialize(project, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| {
"content_hash": "7adf7e1322a163a3368c9b638494f469",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 81,
"avg_line_length": 36.30434782608695,
"alnum_prop": 0.6025948103792416,
"repo_name": "zenefits/sentry",
"id": "b707d4be64b477dcb7900722f5881a325060d2a1",
"size": "5010",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/team_project_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "249557"
},
{
"name": "HTML",
"bytes": "293019"
},
{
"name": "JavaScript",
"bytes": "975797"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5959"
},
{
"name": "Python",
"bytes": "12550461"
},
{
"name": "Ruby",
"bytes": "4026"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
import re
from nova import exception
from nova.i18n import _
# Define the minimum and maximum version of the API across all of the
# REST API. The format of the version is:
# X.Y where:
#
# - X will only be changed if a significant backwards incompatible API
# change is made which affects the API as whole. That is, something
# that is only very very rarely incremented.
#
# - Y when you make any change to the API. Note that this includes
# semantic changes which may not affect the input or output formats or
# even originate in the API code layer. We are not distinguishing
# between backwards compatible and backwards incompatible changes in
# the versioning system. It must be made clear in the documentation as
# to what is a backwards compatible change and what is a backwards
# incompatible one.
#
# You must update the API version history string below with a one or
# two line description as well as update rest_api_version_history.rst
REST_API_VERSION_HISTORY = """REST API Version History:
* 2.1 - Initial version. Equivalent to v2.0 code
* 2.2 - Adds (keypair) type parameter for os-keypairs plugin
Fixes success status code for create/delete a keypair method
* 2.3 - Exposes additional os-extended-server-attributes
Exposes delete_on_termination for os-extended-volumes
* 2.4 - Exposes reserved field in os-fixed-ips.
* 2.5 - Allow server search option ip6 for non-admin
* 2.6 - Consolidate the APIs for getting remote consoles
* 2.7 - Check flavor type before add tenant access.
* 2.8 - Add new protocol for VM console (mks)
* 2.9 - Exposes lock information in server details.
* 2.10 - Allow admins to query, create and delete keypairs owned by any
user.
* 2.11 - Exposes forced_down attribute for os-services
* 2.12 - Exposes VIF net_id in os-virtual-interfaces
* 2.13 - Add project id and user id information for os-server-groups API
* 2.14 - Remove onSharedStorage from evacuate request body and remove
adminPass from the response body
* 2.15 - Add soft-affinity and soft-anti-affinity policies
* 2.16 - Exposes host_status for servers/detail and servers/{server_id}
* 2.17 - Add trigger_crash_dump to server actions
* 2.18 - Makes project_id optional in v2.1
* 2.19 - Allow user to set and get the server description
* 2.20 - Add attach and detach volume operations for instances in shelved
and shelved_offloaded state
* 2.21 - Make os-instance-actions read deleted instances
* 2.22 - Add API to force live migration to complete
* 2.23 - Add index/show API for server migrations.
Also add migration_type for /os-migrations and add ref link for it
when the migration is an in progress live migration.
* 2.24 - Add API to cancel a running live migration
* 2.25 - Make block_migration support 'auto' and remove
disk_over_commit for os-migrateLive.
* 2.26 - Adds support of server tags
* 2.27 - Adds support for new-style microversion headers while
keeping support for the original style.
* 2.28 - Changes compute_node.cpu_info from string to object
* 2.29 - Add a force flag in evacuate request body and change the
behaviour for the host flag by calling the scheduler.
* 2.30 - Add a force flag in live-migrate request body and change the
behaviour for the host flag by calling the scheduler.
* 2.31 - Fix os-console-auth-tokens to work for all console types.
* 2.32 - Add tag to networks and block_device_mapping_v2 in server boot
request body.
* 2.33 - Add pagination support for hypervisors.
* 2.34 - Checks before live-migration are made in asynchronous way.
os-Migratelive Action does not throw badRequest in case of
pre-checks failure. Verification result is available over
instance-actions.
* 2.35 - Adds keypairs pagination support.
* 2.36 - Deprecates all the API which proxy to another service and fping
API.
* 2.37 - Adds support for auto-allocating networking, otherwise known as
"Get me a Network". Also enforces server.networks.uuid to be in
UUID format.
* 2.38 - Add a condition to return HTTPBadRequest if invalid status is
provided for listing servers.
"""
# The minimum and maximum versions of the API supported
# The default api version request is defined to be the
# the minimum version of the API supported.
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
_MAX_API_VERSION = "2.38"
DEFAULT_API_VERSION = _MIN_API_VERSION
# All the proxy APIs which related network, images and baremetal
# were deprecated from 2.36.
MAX_PROXY_API_SUPPORT_VERSION = '2.35'
MIN_WITHOUT_PROXY_API_SUPPORT_VERSION = '2.36'
# NOTE(cyeoh): min and max versions declared as functions so we can
# mock them for unittests. Do not use the constants directly anywhere
# else.
def min_api_version():
return APIVersionRequest(_MIN_API_VERSION)
def max_api_version():
return APIVersionRequest(_MAX_API_VERSION)
def is_supported(req, min_version=_MIN_API_VERSION,
max_version=_MAX_API_VERSION):
"""Check if API request version satisfies version restrictions.
:param req: request object
:param min_version: minimal version of API needed for correct
request processing
:param max_version: maximum version of API needed for correct
request processing
:returns True if request satisfies minimal and maximum API version
requirements. False in other case.
"""
return (APIVersionRequest(max_version) >= req.api_version_request >=
APIVersionRequest(min_version))
class APIVersionRequest(object):
"""This class represents an API Version Request with convenience
methods for manipulation and comparison of version
numbers that we need to do to implement microversions.
"""
def __init__(self, version_string=None):
"""Create an API version request object.
:param version_string: String representation of APIVersionRequest.
Correct format is 'X.Y', where 'X' and 'Y' are int values.
None value should be used to create Null APIVersionRequest,
which is equal to 0.0
"""
self.ver_major = 0
self.ver_minor = 0
if version_string is not None:
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
version_string)
if match:
self.ver_major = int(match.group(1))
self.ver_minor = int(match.group(2))
else:
raise exception.InvalidAPIVersionString(version=version_string)
def __str__(self):
"""Debug/Logging representation of object."""
return ("API Version Request Major: %s, Minor: %s"
% (self.ver_major, self.ver_minor))
def is_null(self):
return self.ver_major == 0 and self.ver_minor == 0
def _format_type_error(self, other):
return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") %
{"other": other, "cls": self.__class__})
def __lt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) <
(other.ver_major, other.ver_minor))
def __eq__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) ==
(other.ver_major, other.ver_minor))
def __gt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) >
(other.ver_major, other.ver_minor))
def __le__(self, other):
return self < other or self == other
def __ne__(self, other):
return not self.__eq__(other)
def __ge__(self, other):
return self > other or self == other
def matches(self, min_version, max_version):
"""Returns whether the version object represents a version
greater than or equal to the minimum version and less than
or equal to the maximum version.
@param min_version: Minimum acceptable version.
@param max_version: Maximum acceptable version.
@returns: boolean
If min_version is null then there is no minimum limit.
If max_version is null then there is no maximum limit.
If self is null then raise ValueError
"""
if self.is_null():
raise ValueError
if max_version.is_null() and min_version.is_null():
return True
elif max_version.is_null():
return min_version <= self
elif min_version.is_null():
return self <= max_version
else:
return min_version <= self <= max_version
def get_string(self):
"""Converts object to string representation which if used to create
an APIVersionRequest object results in the same version request.
"""
if self.is_null():
raise ValueError
return "%s.%s" % (self.ver_major, self.ver_minor)
| {
"content_hash": "b16d8be4129d85a6fb00aaaf64883b29",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 41.4061135371179,
"alnum_prop": 0.6568234549673064,
"repo_name": "cloudbase/nova",
"id": "6a3865f96796fa137d06698bd7f83485e83082f3",
"size": "10084",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/api/openstack/api_version_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18199370"
},
{
"name": "Shell",
"bytes": "37074"
},
{
"name": "Smarty",
"bytes": "299657"
}
],
"symlink_target": ""
} |
from base import Ref, QuickbooksManagedObject
class Item(QuickbooksManagedObject):
"""
QBO definition: An item is a thing that your company buys, sells, or re-sells, such as products and services.
An item is shown as a line on an invoice or other sales form. The Item.Type attribute, which specifies how
the item is used, has one of the following values:
Inventory - This type tracks merchandise that your business purchases, stocks, and re-sells as inventory.
QuickBooks tracks the current number of inventory items in stock, cost of goods sold, and the asset value of
the inventory after the purchase and sale of every item.
Service - This type tracks services that you charge on the purchase and tracks merchandise you sell and buy that
is not tracked as inventory. For example, specialized labor, consulting hours, and professional fees.
"""
class_dict = {
"AssetAccountRef": Ref,
"ExpenseAccountRef": Ref,
"IncomeAccountRef": Ref
}
qbo_object_name = "Item"
def __init__(self):
super(Item, self).__init__()
self.Name = ""
self.Description = ""
self.Active = True
self.FullyQualifiedName = ""
self.Taxable = ""
self.UnitPrice = ""
self.Type = ""
self.PurchaseDesc = ""
self.PurchaseCost = 0
self.TrackQtyOnHand = True
self.QtyOnHand = 0
self.InvStartDate = ""
self.AssetAccountRef = None
self.ExpenseAccountRef = None
self.IncomeAccountRef = None
def __unicode__(self):
return self.Name
| {
"content_hash": "e2db13490d0bc9b346bc18478d7a76b3",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 119,
"avg_line_length": 34.083333333333336,
"alnum_prop": 0.6491442542787286,
"repo_name": "ferdiaz/python-quickbooks",
"id": "d81a7c77ff3437c14136c1902ac9466a3eecc3cd",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quickbooks/objects/item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "92199"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import PublishOptions
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
def on_event(i):
print("Got event: {}".format(i))
yield self.subscribe(on_event, u'com.myapp.topic1')
counter = 0
while True:
print("publish: com.myapp.topic1", counter)
pub_options = PublishOptions(
acknowledge=True,
disclose_me=True,
exclude_me=False
)
publication = yield self.publish(
u'com.myapp.topic1', counter,
options=pub_options,
)
print("Published with publication ID {}".format(publication.id))
counter += 1
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| {
"content_hash": "063b037de7e8567492b63bf6a6531329",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 29.53061224489796,
"alnum_prop": 0.6067726330338632,
"repo_name": "inirudebwoy/AutobahnPython",
"id": "35e8df5def61eef9fa3d38461eb4df24f847a531",
"size": "2724",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/pubsub/options/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3345"
},
{
"name": "Python",
"bytes": "890704"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import distutils
from distutils.errors import DistutilsError
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
try:
set
except NameError:
from sets import Set as set
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib(object):
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300': 'msvcr70', # MSVC 7.0
'1310': 'msvcr71', # MSVC 7.1
'1400': 'msvcr80', # MSVC 8
'1500': 'msvcr90', # MSVC 9 (VS 2008)
'1600': 'msvcr100', # MSVC 10 (aka 2010)
}.get(msc_ver, None)
else:
lib = None
return lib
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source, 'r')
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, basestring)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = npy_load_module('_'.join(n.split('.')),
setup_py,
('.py', 'U', 1))
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat::
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['svnversion'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['hg identify --num'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
f = open(branch_fn)
revision0 = f.read().strip()
f.close()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
revision = branch_map.get(branch0)
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version\__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
))
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
''')
f.close()
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
if sys.version[:3] >= '2.5':
def get_build_architecture():
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
else:
#copied from python 2.5.1 distutils/msvccompiler.py
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
| {
"content_hash": "b34b58190b53058f893c7dde3e26b10e",
"timestamp": "",
"source": "github",
"line_count": 2306,
"max_line_length": 102,
"avg_line_length": 35.41803989592368,
"alnum_prop": 0.54127384479761,
"repo_name": "ryfeus/lambda-packs",
"id": "0359063033829646a286462dfacc72ab27917db4",
"size": "81674",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "Keras_tensorflow/source/numpy/distutils/misc_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import mock
from nova import test
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm import slot
from pypowervm import exceptions as pvm_exc
class TestNovaSlotManager(test.NoDBTestCase):
def setUp(self):
super(TestNovaSlotManager, self).setUp()
self.store_api = mock.MagicMock()
self.inst = mock.MagicMock(uuid='uuid1')
def test_build_slot_mgr(self):
# Test when NVRAM store exists
# The Swift-backed implementation of PowerVM SlotMapStore is returned
self.store_api.fetch_slot_map = mock.MagicMock(return_value=None)
slot_mgr = slot.build_slot_mgr(self.inst, self.store_api, adapter=None,
vol_drv_iter=None)
self.assertIsInstance(slot_mgr, slot.SwiftSlotManager)
self.assertFalse(slot_mgr.is_rebuild)
# Test when no NVRAM store is set up
# The no-op implementation of PowerVM SlotMapStore is returned
self.assertIsInstance(
slot.build_slot_mgr(self.inst, None, adapter=None,
vol_drv_iter=None),
slot.NoopSlotManager)
# Test that the rebuild flag is set when it is flagged as a rebuild
slot_mgr = slot.build_slot_mgr(
self.inst, self.store_api, adapter='adpt', vol_drv_iter='test')
self.assertTrue(slot_mgr.is_rebuild)
class TestSwiftSlotManager(test.NoDBTestCase):
def setUp(self):
super(TestSwiftSlotManager, self).setUp()
self.store_api = mock.MagicMock()
self.store_api.fetch_slot_map = mock.MagicMock(return_value=None)
self.inst = mock.MagicMock(uuid='a2e71b38-160f-4650-bbdc-2a10cd507e2b')
self.slot_mgr = slot.SwiftSlotManager(self.store_api,
instance=self.inst)
def test_load(self):
# load() should have been called internally by __init__
self.store_api.fetch_slot_map.assert_called_with(
self.inst.uuid + '_slot_map')
def test_save(self):
# Mock the call
self.store_api.store_slot_map = mock.MagicMock()
# Run save
self.slot_mgr.save()
# Not called because nothing changed
self.store_api.store_slot_map.assert_not_called()
# Change something
mock_vfcmap = mock.Mock(server_adapter=mock.Mock(lpar_slot_num=123))
self.slot_mgr.register_vfc_mapping(mock_vfcmap, 'fabric')
# Run save
self.slot_mgr.save()
# Validate the call
self.store_api.store_slot_map.assert_called_once_with(
self.inst.uuid + '_slot_map', mock.ANY)
def test_delete(self):
# Mock the call
self.store_api.delete_slot_map = mock.MagicMock()
# Run delete
self.slot_mgr.delete()
# Validate the call
self.store_api.delete_slot_map.assert_called_once_with(
self.inst.uuid + '_slot_map')
@mock.patch('pypowervm.tasks.slot_map.RebuildSlotMap', autospec=True)
@mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
def test_init_recreate_map(self, mock_ftsk, mock_rebuild_slot):
vios1, vios2 = mock.Mock(uuid='uuid1'), mock.Mock(uuid='uuid2')
mock_ftsk.return_value.feed = [vios1, vios2]
self.slot_mgr.init_recreate_map(mock.Mock(), self._vol_drv_iter())
self.assertEqual(1, mock_ftsk.call_count)
mock_rebuild_slot.assert_called_once_with(
self.slot_mgr, mock.ANY, {'udid': ['uuid2'], 'iscsi': ['uuid1']},
['a', 'b'])
@mock.patch('pypowervm.tasks.slot_map.RebuildSlotMap', autospec=True)
@mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
def test_init_recreate_map_fails(self, mock_ftsk, mock_rebuild_slot):
vios1, vios2 = mock.Mock(uuid='uuid1'), mock.Mock(uuid='uuid2')
mock_ftsk.return_value.feed = [vios1, vios2]
mock_rebuild_slot.side_effect = (
pvm_exc.InvalidHostForRebuildNotEnoughVIOS(udid='udid56'))
self.assertRaises(
p_exc.InvalidRebuild, self.slot_mgr.init_recreate_map, mock.Mock(),
self._vol_drv_iter())
@mock.patch('pypowervm.tasks.slot_map.RebuildSlotMap', autospec=True)
@mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
def test_init_recreate_map_fileio(self, mock_ftsk, mock_rebuild_slot):
vios1, vios2 = mock.Mock(uuid='uuid1'), mock.Mock(uuid='uuid2')
mock_ftsk.return_value.feed = [vios1, vios2]
expected_vio_wrap = [vios1, vios2]
self.slot_mgr.init_recreate_map(mock.Mock(), self._vol_drv_iter_2())
self.assertEqual(1, mock_ftsk.call_count)
mock_rebuild_slot.assert_called_once_with(
self.slot_mgr, expected_vio_wrap,
{'udidvscsi': ['uuid1'], 'udid': ['uuid1']}, [])
def _vol_drv_iter_2(self):
mock_fileio = mock.Mock()
mock_fileio.vol_type.return_value = 'fileio'
mock_fileio.is_volume_on_vios.side_effect = ((True, 'udid'),
(False, None))
mock_scsi = mock.Mock()
mock_scsi.vol_type.return_value = 'vscsi'
mock_scsi.is_volume_on_vios.side_effect = ((True, 'udidvscsi'),
(False, None))
vol_drv = [mock_fileio, mock_scsi]
for type in vol_drv:
yield mock.Mock(), type
def _vol_drv_iter(self):
mock_scsi = mock.Mock()
mock_scsi.vol_type.return_value = 'vscsi'
mock_scsi.is_volume_on_vios.side_effect = ((False, None),
(True, 'udid'))
mock_iscsi = mock.Mock()
mock_iscsi.vol_type.return_value = 'iscsi'
mock_iscsi.is_volume_on_vios.side_effect = ((True, 'iscsi'),
(False, None))
mock_npiv1 = mock.Mock()
mock_npiv1.vol_type.return_value = 'npiv'
mock_npiv1._fabric_names.return_value = ['a', 'b']
mock_npiv2 = mock.Mock()
mock_npiv2.vol_type.return_value = 'npiv'
mock_npiv2._fabric_names.return_value = ['a', 'b', 'c']
vol_drv = [mock_scsi, mock_npiv1, mock_npiv2, mock_iscsi]
for type in vol_drv:
yield mock.Mock(), type
| {
"content_hash": "2d9384c9ce66d3ec0283473e31ca5c56",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 41.14935064935065,
"alnum_prop": 0.6026510967334701,
"repo_name": "stackforge/nova-powervm",
"id": "5a2f2830cbe0219003fb0536d25ffad30e2038a4",
"size": "6972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova_powervm/tests/virt/powervm/test_slot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716308"
},
{
"name": "Shell",
"bytes": "6293"
}
],
"symlink_target": ""
} |
from flask import request, current_app, redirect
from mediacrush.config import _cfg
from mediacrush.network import is_tor
def tor_redirect(path):
if is_tor():
return redirect(_cfg("tor_domain") + '/' + path)
return redirect(path)
| {
"content_hash": "054cd61ca3e46540e4131f784838e930",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 30.875,
"alnum_prop": 0.7044534412955465,
"repo_name": "roderickm/MediaCrush",
"id": "f3c1a4c944062069cb2e2dece7d36b1987323353",
"size": "247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mediacrush/tor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53145"
},
{
"name": "CoffeeScript",
"bytes": "85257"
},
{
"name": "HTML",
"bytes": "83198"
},
{
"name": "JavaScript",
"bytes": "846439"
},
{
"name": "Nginx",
"bytes": "2420"
},
{
"name": "Python",
"bytes": "110708"
}
],
"symlink_target": ""
} |
"""The Integration integration."""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Integration from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, (Platform.SENSOR,))
entry.async_on_unload(entry.add_update_listener(config_entry_update_listener))
return True
async def config_entry_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update listener, called when the config entry options are changed."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, (Platform.SENSOR,))
| {
"content_hash": "534daf6cd31a41ca2040d042bebdf211",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 41.34782608695652,
"alnum_prop": 0.7592008412197687,
"repo_name": "nkgilley/home-assistant",
"id": "f482f4e41e87790b121fffc6179f78b472cf850b",
"size": "951",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/integration/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from ornithology import *
#
# Setup a personal condor with just a schedd - no need for anything else
#
@standup
def condor(test_dir):
with Condor(
local_dir=test_dir / "condor",
config={
"DAEMON_LIST": "SCHEDD MASTER",
"USE_SHARED_PORT": False
}
) as condor:
yield condor
#
# Submit a job that will immediately go on hold due to job policy expressions.
# We use scheduler universe so it runs quickly; no need to wait for matches etc.
#
@action
def job(condor, path_to_sleep):
handle = condor.submit(
description={"executable": path_to_sleep,
"arguments": "0",
"universe": "scheduler",
"on_exit_hold": "true",
"log": "job_events.log"
},
count=1,
)
# Wait for job to go on hold the first time...
assert handle.wait(condition=ClusterState.any_held,timeout=60)
# ...and then release it....
handle.release()
# ...and now wait for the job to be released in the event log....
handle.wait(condition=ClusterState.none_held,timeout=60)
# and finally wait for job to be held a second time... this way we can confirm hold counters
# are incrementing as they should.
assert handle.wait(condition=ClusterState.any_held,timeout=60)
# Return the first (and only) job ad in the cluster for testing class to reference
return handle.query()[0]
#
# The tests.... assertions on hold aggregates in the job ad
#
class TestJobHoldAggregates:
# Methods that begin with test_* are tests.
def test_holdreasoncode(self, job):
assert job["HoldReasonCode"] == 3
def test_lastholdreasoncode(self, job):
assert job["LastHoldReasonCode"] == 3
def test_holdreasonsubcode(self, job):
assert job["HoldReasonSubCode"] == 0
def test_lastholdsubreasoncode(self, job):
assert job["LastHoldReasonSubCode"] == 0
def test_hold_numholds(self, job):
assert job["NumHolds"] == 2
def test_hold_numholdsbyreason_was_policy(self, job):
assert job["NumHoldsByReason"] == { 'JobPolicy' : 2 }
| {
"content_hash": "3312d9b62ac110a6b958c1322c434448",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 96,
"avg_line_length": 31.544117647058822,
"alnum_prop": 0.6335664335664336,
"repo_name": "htcondor/htcondor",
"id": "6917b9b3866064be35a509e161593286cf2fe8e9",
"size": "2409",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/condor_tests/test_num_holds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "71055"
},
{
"name": "Awk",
"bytes": "9454"
},
{
"name": "Batchfile",
"bytes": "146264"
},
{
"name": "C",
"bytes": "1651049"
},
{
"name": "C++",
"bytes": "31790435"
},
{
"name": "CMake",
"bytes": "468527"
},
{
"name": "CSS",
"bytes": "9738"
},
{
"name": "Dockerfile",
"bytes": "75955"
},
{
"name": "Fortran",
"bytes": "1279"
},
{
"name": "HTML",
"bytes": "59724"
},
{
"name": "Java",
"bytes": "43977"
},
{
"name": "JavaScript",
"bytes": "130293"
},
{
"name": "M4",
"bytes": "20440"
},
{
"name": "Makefile",
"bytes": "68811"
},
{
"name": "Perl",
"bytes": "3761627"
},
{
"name": "PowerShell",
"bytes": "5412"
},
{
"name": "Python",
"bytes": "1593654"
},
{
"name": "Roff",
"bytes": "2353"
},
{
"name": "Shell",
"bytes": "579393"
},
{
"name": "VBScript",
"bytes": "8734"
},
{
"name": "Yacc",
"bytes": "13532"
}
],
"symlink_target": ""
} |
"""A simple Wi-Fi mobility manager."""
from empower_core.app import EVERY
from empower.managers.ranmanager.lvapp.wifiapp import EWiFiApp
class WiFiMobilityManager(EWiFiApp):
"""A simple Wi-Fi mobility manager.
This app will peridodically handover every LVAP in the network to the
interface with the highest RSSI.
Parameters:
service_id: the application id as an UUID (mandatory)
project_id: the project id as an UUID (mandatory)
every: the loop period in ms (optional, default 2000ms)
Example:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
{
"name": "empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 2000
}
}
"""
def loop(self):
"""Periodic job."""
for lvap in self.lvaps.values():
lvap.blocks = self.blocks().sort_by_rssi(lvap.addr).first()
def launch(context, service_id, every=EVERY):
""" Initialize the module. """
return WiFiMobilityManager(context=context,
service_id=service_id,
every=every)
| {
"content_hash": "9aa61bb3eb664e58a4c13be39787a2d4",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 28.829268292682926,
"alnum_prop": 0.6125211505922166,
"repo_name": "5g-empower/empower-runtime",
"id": "9e67c0f2ea91104780342950e0da8ae0fc91b09a",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empower/apps/wifimobilitymanager/wifimobilitymanager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "77777"
},
{
"name": "JavaScript",
"bytes": "210914"
},
{
"name": "Python",
"bytes": "499223"
}
],
"symlink_target": ""
} |
__author__ = 'Tom Schaul, [email protected]'
from random import choice
from gomokuplayer import GomokuPlayer
class RandomGomokuPlayer(GomokuPlayer):
""" do random moves in Go-Moku"""
def getAction(self):
return [self.color, choice(self.game.getLegals(self.color))] | {
"content_hash": "7dfdbb6ceed0bae4369588ae1cd08367",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7142857142857143,
"repo_name": "arnaudsj/pybrain",
"id": "cf8fc034012fcfbb56d657f6f9ce3dcf697563c7",
"size": "280",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pybrain/rl/environments/twoplayergames/gomokuplayers/randomplayer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwglobal_appfwpolicy_binding(base_resource) :
""" Binding class showing the appfwpolicy that can be bound to appfwglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._state = ""
self._labeltype = ""
self._labelname = ""
self._numpol = 0
self._flowtype = 0
self._type = ""
self._policytype = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the policy label to invoke if the current policy evaluates to TRUE, the invoke parameter is set, and Label Type is set to Policy Label.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the policy label to invoke if the current policy evaluates to TRUE, the invoke parameter is set, and Label Type is set to Policy Label.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def state(self) :
ur"""Enable or disable the binding to activate or deactivate the policy. This is applicable to classic policies only.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
ur"""Enable or disable the binding to activate or deactivate the policy. This is applicable to classic policies only.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def type(self) :
ur"""Bind point to which to policy is bound.<br/>Possible values = REQ_OVERRIDE, REQ_DEFAULT, NONE.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
ur"""Bind point to which to policy is bound.<br/>Possible values = REQ_OVERRIDE, REQ_DEFAULT, NONE
"""
try :
self._type = type
except Exception as e:
raise e
@property
def invoke(self) :
ur"""If the current policy evaluates to TRUE, terminate evaluation of policies bound to the current policy label, and then forward the request to the specified virtual server or evaluate the specified policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""If the current policy evaluates to TRUE, terminate evaluation of policies bound to the current policy label, and then forward the request to the specified virtual server or evaluate the specified policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label invocation.<br/>Possible values = reqvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of policy label invocation.<br/>Possible values = reqvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def flowtype(self) :
ur"""flowtype of the bound application firewall policy.
"""
try :
return self._flowtype
except Exception as e:
raise e
@property
def numpol(self) :
ur"""The number of policies bound to the bindpoint.
"""
try :
return self._numpol
except Exception as e:
raise e
@property
def policytype(self) :
ur""".<br/>Possible values = Classic Policy, Advanced Policy.
"""
try :
return self._policytype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwglobal_appfwpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwglobal_appfwpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = appfwglobal_appfwpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.state = resource.state
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.type = resource.type
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [appfwglobal_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].state = resource[i].state
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].type = resource[i].type
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = appfwglobal_appfwpolicy_binding()
deleteresource.policyname = resource.policyname
deleteresource.type = resource.type
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [appfwglobal_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].type = resource[i].type
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
ur""" Use this API to fetch a appfwglobal_appfwpolicy_binding resources.
"""
try :
obj = appfwglobal_appfwpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
ur""" Use this API to fetch filtered set of appfwglobal_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwglobal_appfwpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
ur""" Use this API to count appfwglobal_appfwpolicy_binding resources configued on NetScaler.
"""
try :
obj = appfwglobal_appfwpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
ur""" Use this API to count the filtered set of appfwglobal_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwglobal_appfwpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Type:
REQ_OVERRIDE = "REQ_OVERRIDE"
REQ_DEFAULT = "REQ_DEFAULT"
NONE = "NONE"
class Labeltype:
reqvserver = "reqvserver"
policylabel = "policylabel"
class Policytype:
Classic_Policy = "Classic Policy"
Advanced_Policy = "Advanced Policy"
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class appfwglobal_appfwpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.appfwglobal_appfwpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwglobal_appfwpolicy_binding = [appfwglobal_appfwpolicy_binding() for _ in range(length)]
| {
"content_hash": "3dcb1c2c6f40107f7d51bf5f30ae9b08",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 216,
"avg_line_length": 29.4,
"alnum_prop": 0.7101657564434224,
"repo_name": "atopuzov/nitro-python",
"id": "ee8a451fc60a2a51071422de13ac4f179f6635db",
"size": "11051",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/appfw/appfwglobal_appfwpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0016_auto_20160420_1411'),
]
operations = [
migrations.RemoveField(
model_name='outputdata',
name='scenario',
),
migrations.AddField(
model_name='outputdata',
name='scenarios',
field=models.ManyToManyField(to='climatemodels.Scenario'),
),
migrations.RemoveField(
model_name='inputdata',
name='scenario',
),
migrations.AddField(
model_name='inputdata',
name='scenario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='climatemodels.Scenario'),
),
]
| {
"content_hash": "c3fe08546770c5985d94a79f62bc68b6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 133,
"avg_line_length": 28.0625,
"alnum_prop": 0.589086859688196,
"repo_name": "bruecksen/isimip",
"id": "046d615f3b015c18cfbfbc7cdc3030f1f9b065fb",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isi_mip/climatemodels/migrations/0017_auto_20160420_1434.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36731"
},
{
"name": "HTML",
"bytes": "106877"
},
{
"name": "JavaScript",
"bytes": "30564"
},
{
"name": "Python",
"bytes": "4244200"
},
{
"name": "Shell",
"bytes": "789"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import atexit
import ctypes as ct
import fcntl
import json
import os
import re
import errno
import sys
import platform
from .libbcc import lib, bcc_symbol, bcc_symbol_option, bcc_stacktrace_build_id, _SYM_CB_TYPE
from .table import Table, PerfEventArray, RingBuf, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK
from .perf import Perf
from .utils import get_online_cpus, printb, _assert_is_bytes, ArgString, StrcmpRewrite
from .version import __version__
from .disassembler import disassemble_prog, decode_map
from .usdt import USDT, USDTException
try:
basestring
except NameError: # Python 3
basestring = str
_default_probe_limit = 1000
_num_open_probes = 0
# for tests
def _get_num_open_probes():
global _num_open_probes
return _num_open_probes
TRACEFS = "/sys/kernel/debug/tracing"
# Debug flags
# Debug output compiled LLVM IR.
DEBUG_LLVM_IR = 0x1
# Debug output loaded BPF bytecode and register state on branches.
DEBUG_BPF = 0x2
# Debug output pre-processor result.
DEBUG_PREPROCESSOR = 0x4
# Debug output ASM instructions embedded with source.
DEBUG_SOURCE = 0x8
# Debug output register state on all instructions in addition to DEBUG_BPF.
DEBUG_BPF_REGISTER_STATE = 0x10
# Debug BTF.
DEBUG_BTF = 0x20
class SymbolCache(object):
def __init__(self, pid):
self.cache = lib.bcc_symcache_new(
pid, ct.cast(None, ct.POINTER(bcc_symbol_option)))
def resolve(self, addr, demangle):
"""
Return a tuple of the symbol (function), its offset from the beginning
of the function, and the module in which it lies. For example:
("start_thread", 0x202, "/usr/lib/.../libpthread-2.24.so")
If the symbol cannot be found but we know which module it is in,
return the module name and the offset from the beginning of the
module. If we don't even know the module, return the absolute
address as the offset.
"""
sym = bcc_symbol()
if demangle:
res = lib.bcc_symcache_resolve(self.cache, addr, ct.byref(sym))
else:
res = lib.bcc_symcache_resolve_no_demangle(self.cache, addr,
ct.byref(sym))
if res < 0:
if sym.module and sym.offset:
return (None, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
return (None, addr, None)
if demangle:
name_res = sym.demangle_name
lib.bcc_symbol_free_demangle_name(ct.byref(sym))
else:
name_res = sym.name
return (name_res, sym.offset, ct.cast(sym.module, ct.c_char_p).value)
def resolve_name(self, module, name):
module = _assert_is_bytes(module)
name = _assert_is_bytes(name)
addr = ct.c_ulonglong()
if lib.bcc_symcache_resolve_name(self.cache, module, name,
ct.byref(addr)) < 0:
return -1
return addr.value
class PerfType:
# From perf_type_id in uapi/linux/perf_event.h
HARDWARE = 0
SOFTWARE = 1
TRACEPOINT = 2
HW_CACHE = 3
RAW = 4
BREAKPOINT = 5
class PerfHWConfig:
# From perf_hw_id in uapi/linux/perf_event.h
CPU_CYCLES = 0
INSTRUCTIONS = 1
CACHE_REFERENCES = 2
CACHE_MISSES = 3
BRANCH_INSTRUCTIONS = 4
BRANCH_MISSES = 5
BUS_CYCLES = 6
STALLED_CYCLES_FRONTEND = 7
STALLED_CYCLES_BACKEND = 8
REF_CPU_CYCLES = 9
class PerfSWConfig:
# From perf_sw_id in uapi/linux/perf_event.h
CPU_CLOCK = 0
TASK_CLOCK = 1
PAGE_FAULTS = 2
CONTEXT_SWITCHES = 3
CPU_MIGRATIONS = 4
PAGE_FAULTS_MIN = 5
PAGE_FAULTS_MAJ = 6
ALIGNMENT_FAULTS = 7
EMULATION_FAULTS = 8
DUMMY = 9
BPF_OUTPUT = 10
class PerfEventSampleFormat:
# from perf_event_sample_format in uapi/linux/bpf.h
IP = (1 << 0)
TID = (1 << 1)
TIME = (1 << 2)
ADDR = (1 << 3)
READ = (1 << 4)
CALLCHAIN = (1 << 5)
ID = (1 << 6)
CPU = (1 << 7)
PERIOD = (1 << 8)
STREAM_ID = (1 << 9)
RAW = (1 << 10)
BRANCH_STACK = (1 << 11)
REGS_USER = (1 << 12)
STACK_USER = (1 << 13)
WEIGHT = (1 << 14)
DATA_SRC = (1 << 15)
IDENTIFIER = (1 << 16)
TRANSACTION = (1 << 17)
REGS_INTR = (1 << 18)
PHYS_ADDR = (1 << 19)
AUX = (1 << 20)
CGROUP = (1 << 21)
DATA_PAGE_SIZE = (1 << 22)
CODE_PAGE_SIZE = (1 << 23)
WEIGHT_STRUCT = (1 << 24)
class BPFProgType:
# From bpf_prog_type in uapi/linux/bpf.h
SOCKET_FILTER = 1
KPROBE = 2
SCHED_CLS = 3
SCHED_ACT = 4
TRACEPOINT = 5
XDP = 6
PERF_EVENT = 7
CGROUP_SKB = 8
CGROUP_SOCK = 9
LWT_IN = 10
LWT_OUT = 11
LWT_XMIT = 12
SOCK_OPS = 13
SK_SKB = 14
CGROUP_DEVICE = 15
SK_MSG = 16
RAW_TRACEPOINT = 17
CGROUP_SOCK_ADDR = 18
TRACING = 26
LSM = 29
class BPFAttachType:
# from bpf_attach_type uapi/linux/bpf.h
CGROUP_INET_INGRESS = 0
CGROUP_INET_EGRESS = 1
CGROUP_INET_SOCK_CREATE = 2
CGROUP_SOCK_OPS = 3
SK_SKB_STREAM_PARSER = 4
SK_SKB_STREAM_VERDICT = 5
CGROUP_DEVICE = 6
SK_MSG_VERDICT = 7
CGROUP_INET4_BIND = 8
CGROUP_INET6_BIND = 9
CGROUP_INET4_CONNECT = 10
CGROUP_INET6_CONNECT = 11
CGROUP_INET4_POST_BIND = 12
CGROUP_INET6_POST_BIND = 13
CGROUP_UDP4_SENDMSG = 14
CGROUP_UDP6_SENDMSG = 15
LIRC_MODE2 = 16
FLOW_DISSECTOR = 17
CGROUP_SYSCTL = 18
CGROUP_UDP4_RECVMSG = 19
CGROUP_UDP6_RECVMSG = 20
CGROUP_GETSOCKOPT = 21
CGROUP_SETSOCKOPT = 22
TRACE_RAW_TP = 23
TRACE_FENTRY = 24
TRACE_FEXIT = 25
MODIFY_RETURN = 26
LSM_MAC = 27
TRACE_ITER = 28
CGROUP_INET4_GETPEERNAME = 29
CGROUP_INET6_GETPEERNAME = 30
CGROUP_INET4_GETSOCKNAME = 31
CGROUP_INET6_GETSOCKNAME = 32
XDP_DEVMAP = 33
CGROUP_INET_SOCK_RELEASE = 34
XDP_CPUMAP = 35
SK_LOOKUP = 36
XDP = 37
SK_SKB_VERDICT = 38
class XDPAction:
# from xdp_action uapi/linux/bpf.h
XDP_ABORTED = 0
XDP_DROP = 1
XDP_PASS = 2
XDP_TX = 3
XDP_REDIRECT = 4
class XDPFlags:
# from xdp_flags uapi/linux/if_link.h
# unlike similar enum-type holder classes in this file, source for these
# is #define XDP_FLAGS_UPDATE_IF_NOEXIST, #define XDP_FLAGS_SKB_MODE, ...
UPDATE_IF_NOEXIST = (1 << 0)
SKB_MODE = (1 << 1)
DRV_MODE = (1 << 2)
HW_MODE = (1 << 3)
REPLACE = (1 << 4)
class BPF(object):
# Here for backwards compatibility only, add new enum members and types
# the appropriate wrapper class elsewhere in this file to avoid namespace
# collision issues
SOCKET_FILTER = BPFProgType.SOCKET_FILTER
KPROBE = BPFProgType.KPROBE
SCHED_CLS = BPFProgType.SCHED_CLS
SCHED_ACT = BPFProgType.SCHED_ACT
TRACEPOINT = BPFProgType.TRACEPOINT
XDP = BPFProgType.XDP
PERF_EVENT = BPFProgType.PERF_EVENT
CGROUP_SKB = BPFProgType.CGROUP_SKB
CGROUP_SOCK = BPFProgType.CGROUP_SOCK
LWT_IN = BPFProgType.LWT_IN
LWT_OUT = BPFProgType.LWT_OUT
LWT_XMIT = BPFProgType.LWT_XMIT
SOCK_OPS = BPFProgType.SOCK_OPS
SK_SKB = BPFProgType.SK_SKB
CGROUP_DEVICE = BPFProgType.CGROUP_DEVICE
SK_MSG = BPFProgType.SK_MSG
RAW_TRACEPOINT = BPFProgType.RAW_TRACEPOINT
CGROUP_SOCK_ADDR = BPFProgType.CGROUP_SOCK_ADDR
TRACING = BPFProgType.TRACING
LSM = BPFProgType.LSM
XDP_ABORTED = XDPAction.XDP_ABORTED
XDP_DROP = XDPAction.XDP_DROP
XDP_PASS = XDPAction.XDP_PASS
XDP_TX = XDPAction.XDP_TX
XDP_REDIRECT = XDPAction.XDP_REDIRECT
XDP_FLAGS_UPDATE_IF_NOEXIST = XDPFlags.UPDATE_IF_NOEXIST
XDP_FLAGS_SKB_MODE = XDPFlags.SKB_MODE
XDP_FLAGS_DRV_MODE = XDPFlags.DRV_MODE
XDP_FLAGS_HW_MODE = XDPFlags.HW_MODE
XDP_FLAGS_REPLACE = XDPFlags.REPLACE
# END enum backwards compat
_probe_repl = re.compile(b"[^a-zA-Z0-9_]")
_sym_caches = {}
_bsymcache = lib.bcc_buildsymcache_new()
_auto_includes = {
"linux/time.h": ["time"],
"linux/fs.h": ["fs", "file"],
"linux/blkdev.h": ["bio", "request"],
"linux/slab.h": ["alloc"],
"linux/netdevice.h": ["sk_buff", "net_device"]
}
_syscall_prefixes = [
b"sys_",
b"__x64_sys_",
b"__x32_compat_sys_",
b"__ia32_compat_sys_",
b"__arm64_sys_",
b"__s390x_sys_",
b"__s390_sys_",
]
# BPF timestamps come from the monotonic clock. To be able to filter
# and compare them from Python, we need to invoke clock_gettime.
# Adapted from http://stackoverflow.com/a/1205762
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ct.Structure):
_fields_ = [('tv_sec', ct.c_long), ('tv_nsec', ct.c_long)]
_librt = ct.CDLL('librt.so.1', use_errno=True)
_clock_gettime = _librt.clock_gettime
_clock_gettime.argtypes = [ct.c_int, ct.POINTER(timespec)]
@classmethod
def monotonic_time(cls):
"""monotonic_time()
Returns the system monotonic time from clock_gettime, using the
CLOCK_MONOTONIC constant. The time returned is in nanoseconds.
"""
t = cls.timespec()
if cls._clock_gettime(cls.CLOCK_MONOTONIC, ct.byref(t)) != 0:
errno = ct.get_errno()
raise OSError(errno, os.strerror(errno))
return t.tv_sec * 1e9 + t.tv_nsec
@classmethod
def generate_auto_includes(cls, program_words):
"""
Generates #include statements automatically based on a set of
recognized types such as sk_buff and bio. The input is all the words
that appear in the BPF program, and the output is a (possibly empty)
string of #include statements, such as "#include <linux/fs.h>".
"""
headers = ""
for header, keywords in cls._auto_includes.items():
for keyword in keywords:
for word in program_words:
if keyword in word and header not in headers:
headers += "#include <%s>\n" % header
return headers
# defined for compatibility reasons, to be removed
Table = Table
class Function(object):
def __init__(self, bpf, name, fd):
self.bpf = bpf
self.name = name
self.fd = fd
@staticmethod
def _find_file(filename):
""" If filename is invalid, search in ./ of argv[0] """
if filename:
if not os.path.isfile(filename):
argv0 = ArgString(sys.argv[0])
t = b"/".join([os.path.abspath(os.path.dirname(argv0.__bytes__())), filename])
if os.path.isfile(t):
filename = t
else:
raise Exception("Could not find file %s" % filename)
return filename
@staticmethod
def find_exe(bin_path):
"""
find_exe(bin_path)
Traverses the PATH environment variable, looking for the first
directory that contains an executable file named bin_path, and
returns the full path to that file, or None if no such file
can be found. This is meant to replace invocations of the
"which" shell utility, which doesn't have portable semantics
for skipping aliases.
"""
# Source: http://stackoverflow.com/a/377028
def is_exe(fpath):
return os.path.isfile(fpath) and \
os.access(fpath, os.X_OK)
fpath, fname = os.path.split(bin_path)
if fpath:
if is_exe(bin_path):
return bin_path
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path.encode(), bin_path)
if is_exe(exe_file):
return exe_file
return None
def __init__(self, src_file=b"", hdr_file=b"", text=None, debug=0,
cflags=[], usdt_contexts=[], allow_rlimit=True, device=None,
attach_usdt_ignore_pid=False):
"""Create a new BPF module with the given source code.
Note:
All fields are marked as optional, but either `src_file` or `text`
must be supplied, and not both.
Args:
src_file (Optional[str]): Path to a source file for the module
hdr_file (Optional[str]): Path to a helper header file for the `src_file`
text (Optional[str]): Contents of a source file for the module
debug (Optional[int]): Flags used for debug prints, can be |'d together
See "Debug flags" for explanation
"""
src_file = _assert_is_bytes(src_file)
hdr_file = _assert_is_bytes(hdr_file)
text = _assert_is_bytes(text)
assert not (text and src_file)
self.kprobe_fds = {}
self.uprobe_fds = {}
self.tracepoint_fds = {}
self.raw_tracepoint_fds = {}
self.kfunc_entry_fds = {}
self.kfunc_exit_fds = {}
self.lsm_fds = {}
self.perf_buffers = {}
self.open_perf_events = {}
self._ringbuf_manager = None
self.tracefile = None
atexit.register(self.cleanup)
self.debug = debug
self.funcs = {}
self.tables = {}
self.module = None
cflags_array = (ct.c_char_p * len(cflags))()
for i, s in enumerate(cflags): cflags_array[i] = bytes(ArgString(s))
if src_file:
src_file = BPF._find_file(src_file)
hdr_file = BPF._find_file(hdr_file)
if src_file:
# Read the BPF C source file into the text variable. This ensures,
# that files and inline text are treated equally.
with open(src_file, mode="rb") as file:
text = file.read()
ctx_array = (ct.c_void_p * len(usdt_contexts))()
for i, usdt in enumerate(usdt_contexts):
ctx_array[i] = ct.c_void_p(usdt.get_context())
usdt_text = lib.bcc_usdt_genargs(ctx_array, len(usdt_contexts))
if usdt_text is None:
raise Exception("can't generate USDT probe arguments; " +
"possible cause is missing pid when a " +
"probe in a shared object has multiple " +
"locations")
text = usdt_text + text
self.module = lib.bpf_module_create_c_from_string(text,
self.debug,
cflags_array, len(cflags_array),
allow_rlimit, device)
if not self.module:
raise Exception("Failed to compile BPF module %s" % (src_file or "<text>"))
for usdt_context in usdt_contexts:
usdt_context.attach_uprobes(self, attach_usdt_ignore_pid)
# If any "kprobe__" or "tracepoint__" or "raw_tracepoint__"
# prefixed functions were defined,
# they will be loaded and attached here.
self._trace_autoload()
def load_funcs(self, prog_type=KPROBE):
"""load_funcs(prog_type=KPROBE)
Load all functions in this BPF module with the given type.
Returns a list of the function handles."""
fns = []
for i in range(0, lib.bpf_num_functions(self.module)):
func_name = lib.bpf_function_name(self.module, i)
fns.append(self.load_func(func_name, prog_type))
return fns
def load_func(self, func_name, prog_type, device = None):
func_name = _assert_is_bytes(func_name)
if func_name in self.funcs:
return self.funcs[func_name]
if not lib.bpf_function_start(self.module, func_name):
raise Exception("Unknown program %s" % func_name)
log_level = 0
if (self.debug & DEBUG_BPF_REGISTER_STATE):
log_level = 2
elif (self.debug & DEBUG_BPF):
log_level = 1
fd = lib.bcc_func_load(self.module, prog_type, func_name,
lib.bpf_function_start(self.module, func_name),
lib.bpf_function_size(self.module, func_name),
lib.bpf_module_license(self.module),
lib.bpf_module_kern_version(self.module),
log_level, None, 0, device)
if fd < 0:
atexit.register(self.donothing)
if ct.get_errno() == errno.EPERM:
raise Exception("Need super-user privileges to run")
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to load BPF program %s: %s" %
(func_name, errstr))
fn = BPF.Function(self, func_name, fd)
self.funcs[func_name] = fn
return fn
def dump_func(self, func_name):
"""
Return the eBPF bytecodes for the specified function as a string
"""
func_name = _assert_is_bytes(func_name)
if not lib.bpf_function_start(self.module, func_name):
raise Exception("Unknown program %s" % func_name)
start, = lib.bpf_function_start(self.module, func_name),
size, = lib.bpf_function_size(self.module, func_name),
return ct.string_at(start, size)
def disassemble_func(self, func_name):
bpfstr = self.dump_func(func_name)
return disassemble_prog(func_name, bpfstr)
def decode_table(self, table_name, sizeinfo=False):
table_obj = self[table_name]
table_type = lib.bpf_table_type_id(self.module, table_obj.map_id)
return decode_map(table_name, table_obj, table_type, sizeinfo=sizeinfo)
str2ctype = {
u"_Bool": ct.c_bool,
u"char": ct.c_char,
u"wchar_t": ct.c_wchar,
u"unsigned char": ct.c_ubyte,
u"short": ct.c_short,
u"unsigned short": ct.c_ushort,
u"int": ct.c_int,
u"unsigned int": ct.c_uint,
u"long": ct.c_long,
u"unsigned long": ct.c_ulong,
u"long long": ct.c_longlong,
u"unsigned long long": ct.c_ulonglong,
u"float": ct.c_float,
u"double": ct.c_double,
u"long double": ct.c_longdouble,
u"__int128": ct.c_int64 * 2,
u"unsigned __int128": ct.c_uint64 * 2,
}
@staticmethod
def _decode_table_type(desc):
if isinstance(desc, basestring):
return BPF.str2ctype[desc]
anon = []
fields = []
for t in desc[1]:
if len(t) == 2:
fields.append((t[0], BPF._decode_table_type(t[1])))
elif len(t) == 3:
if isinstance(t[2], list):
fields.append((t[0], BPF._decode_table_type(t[1]) * t[2][0]))
elif isinstance(t[2], int):
fields.append((t[0], BPF._decode_table_type(t[1]), t[2]))
elif isinstance(t[2], basestring) and (
t[2] == u"union" or t[2] == u"struct" or
t[2] == u"struct_packed"):
name = t[0]
if name == "":
name = "__anon%d" % len(anon)
anon.append(name)
fields.append((name, BPF._decode_table_type(t)))
else:
raise Exception("Failed to decode type %s" % str(t))
else:
raise Exception("Failed to decode type %s" % str(t))
base = ct.Structure
is_packed = False
if len(desc) > 2:
if desc[2] == u"union":
base = ct.Union
elif desc[2] == u"struct":
base = ct.Structure
elif desc[2] == u"struct_packed":
base = ct.Structure
is_packed = True
if is_packed:
cls = type(str(desc[0]), (base,), dict(_anonymous_=anon, _pack_=1,
_fields_=fields))
else:
cls = type(str(desc[0]), (base,), dict(_anonymous_=anon,
_fields_=fields))
return cls
def get_table(self, name, keytype=None, leaftype=None, reducer=None):
name = _assert_is_bytes(name)
map_id = lib.bpf_table_id(self.module, name)
map_fd = lib.bpf_table_fd(self.module, name)
is_queuestack = lib.bpf_table_type_id(self.module, map_id) in [BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK]
if map_fd < 0:
raise KeyError
if not keytype and not is_queuestack:
key_desc = lib.bpf_table_key_desc(self.module, name).decode("utf-8")
if not key_desc:
raise Exception("Failed to load BPF Table %s key desc" % name)
keytype = BPF._decode_table_type(json.loads(key_desc))
if not leaftype:
leaf_desc = lib.bpf_table_leaf_desc(self.module, name).decode("utf-8")
if not leaf_desc:
raise Exception("Failed to load BPF Table %s leaf desc" % name)
leaftype = BPF._decode_table_type(json.loads(leaf_desc))
return Table(self, map_id, map_fd, keytype, leaftype, name, reducer=reducer)
def __getitem__(self, key):
if key not in self.tables:
self.tables[key] = self.get_table(key)
return self.tables[key]
def __setitem__(self, key, leaf):
self.tables[key] = leaf
def __len__(self):
return len(self.tables)
def __delitem__(self, key):
del self.tables[key]
def __iter__(self):
return self.tables.__iter__()
@staticmethod
def attach_func(fn, attachable_fd, attach_type, flags=0):
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
res = lib.bpf_prog_attach(fn.fd, attachable_fd, attach_type, flags)
if res < 0:
raise Exception("Failed to attach BPF function with attach_type "\
"{0}: {1}".format(attach_type, os.strerror(-res)))
@staticmethod
def detach_func(fn, attachable_fd, attach_type):
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
res = lib.bpf_prog_detach2(fn.fd, attachable_fd, attach_type)
if res < 0:
raise Exception("Failed to detach BPF function with attach_type "\
"{0}: {1}".format(attach_type, os.strerror(-res)))
@staticmethod
def attach_raw_socket(fn, dev):
dev = _assert_is_bytes(dev)
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
sock = lib.bpf_open_raw_sock(dev)
if sock < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to open raw device %s: %s" % (dev, errstr))
res = lib.bpf_attach_socket(sock, fn.fd)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
fn.sock = sock
@staticmethod
def get_kprobe_functions(event_re):
blacklist_file = "%s/../kprobes/blacklist" % TRACEFS
try:
with open(blacklist_file, "rb") as blacklist_f:
blacklist = set([line.rstrip().split()[1] for line in blacklist_f])
except IOError as e:
if e.errno != errno.EPERM:
raise e
blacklist = set([])
fns = []
in_init_section = 0
in_irq_section = 0
with open("/proc/kallsyms", "rb") as avail_file:
for line in avail_file:
(t, fn) = line.rstrip().split()[1:3]
# Skip all functions defined between __init_begin and
# __init_end
if in_init_section == 0:
if fn == b'__init_begin':
in_init_section = 1
continue
elif in_init_section == 1:
if fn == b'__init_end':
in_init_section = 2
continue
# Skip all functions defined between __irqentry_text_start and
# __irqentry_text_end
if in_irq_section == 0:
if fn == b'__irqentry_text_start':
in_irq_section = 1
continue
# __irqentry_text_end is not always after
# __irqentry_text_start. But only happens when
# no functions between two irqentry_text
elif fn == b'__irqentry_text_end':
in_irq_section = 2
continue
elif in_irq_section == 1:
if fn == b'__irqentry_text_end':
in_irq_section = 2
continue
# All functions defined as NOKPROBE_SYMBOL() start with the
# prefix _kbl_addr_*, blacklisting them by looking at the name
# allows to catch also those symbols that are defined in kernel
# modules.
if fn.startswith(b'_kbl_addr_'):
continue
# Explicitly blacklist perf-related functions, they are all
# non-attachable.
elif fn.startswith(b'__perf') or fn.startswith(b'perf_'):
continue
# Exclude all static functions with prefix __SCT__, they are
# all non-attachable
elif fn.startswith(b'__SCT__'):
continue
# Exclude all gcc 8's extra .cold functions
elif re.match(b'^.*\.cold(\.\d+)?$', fn):
continue
if (t.lower() in [b't', b'w']) and re.match(event_re, fn) \
and fn not in blacklist:
fns.append(fn)
return set(fns) # Some functions may appear more than once
def _check_probe_quota(self, num_new_probes):
global _num_open_probes
if _num_open_probes + num_new_probes > BPF.get_probe_limit():
raise Exception("Number of open probes would exceed global quota")
@staticmethod
def get_probe_limit():
env_probe_limit = os.environ.get('BCC_PROBE_LIMIT')
if env_probe_limit and env_probe_limit.isdigit():
return int(env_probe_limit)
else:
return _default_probe_limit
def _add_kprobe_fd(self, ev_name, fn_name, fd):
global _num_open_probes
if ev_name not in self.kprobe_fds:
self.kprobe_fds[ev_name] = {}
self.kprobe_fds[ev_name][fn_name] = fd
_num_open_probes += 1
def _del_kprobe_fd(self, ev_name, fn_name):
global _num_open_probes
del self.kprobe_fds[ev_name][fn_name]
_num_open_probes -= 1
def _add_uprobe_fd(self, name, fd):
global _num_open_probes
self.uprobe_fds[name] = fd
_num_open_probes += 1
def _del_uprobe_fd(self, name):
global _num_open_probes
del self.uprobe_fds[name]
_num_open_probes -= 1
# Find current system's syscall prefix by testing on the BPF syscall.
# If no valid value found, will return the first possible value which
# would probably lead to error in later API calls.
def get_syscall_prefix(self):
for prefix in self._syscall_prefixes:
if self.ksymname(b"%sbpf" % prefix) != -1:
return prefix
return self._syscall_prefixes[0]
# Given a syscall's name, return the full Kernel function name with current
# system's syscall prefix. For example, given "clone" the helper would
# return "sys_clone" or "__x64_sys_clone".
def get_syscall_fnname(self, name):
name = _assert_is_bytes(name)
return self.get_syscall_prefix() + name
# Given a Kernel function name that represents a syscall but already has a
# prefix included, transform it to current system's prefix. For example,
# if "sys_clone" provided, the helper may translate it to "__x64_sys_clone".
def fix_syscall_fnname(self, name):
name = _assert_is_bytes(name)
for prefix in self._syscall_prefixes:
if name.startswith(prefix):
return self.get_syscall_fnname(name[len(prefix):])
return name
def attach_kprobe(self, event=b"", event_off=0, fn_name=b"", event_re=b""):
event = _assert_is_bytes(event)
fn_name = _assert_is_bytes(fn_name)
event_re = _assert_is_bytes(event_re)
# allow the caller to glob multiple functions together
if event_re:
matches = BPF.get_kprobe_functions(event_re)
self._check_probe_quota(len(matches))
failed = 0
probes = []
for line in matches:
try:
self.attach_kprobe(event=line, fn_name=fn_name)
except:
failed += 1
probes.append(line)
if failed == len(matches):
raise Exception("Failed to attach BPF program %s to kprobe %s" %
(fn_name, '/'.join(probes)))
return
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_")
fd = lib.bpf_attach_kprobe(fn.fd, 0, ev_name, event, event_off, 0)
if fd < 0:
raise Exception("Failed to attach BPF program %s to kprobe %s" %
(fn_name, event))
self._add_kprobe_fd(ev_name, fn_name, fd)
return self
def attach_kretprobe(self, event=b"", fn_name=b"", event_re=b"", maxactive=0):
event = _assert_is_bytes(event)
fn_name = _assert_is_bytes(fn_name)
event_re = _assert_is_bytes(event_re)
# allow the caller to glob multiple functions together
if event_re:
matches = BPF.get_kprobe_functions(event_re)
failed = 0
probes = []
for line in matches:
try:
self.attach_kretprobe(event=line, fn_name=fn_name,
maxactive=maxactive)
except:
failed += 1
probes.append(line)
if failed == len(matches):
raise Exception("Failed to attach BPF program %s to kretprobe %s" %
(fn_name, '/'.join(probes)))
return
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_")
fd = lib.bpf_attach_kprobe(fn.fd, 1, ev_name, event, 0, maxactive)
if fd < 0:
raise Exception("Failed to attach BPF program %s to kretprobe %s" %
(fn_name, event))
self._add_kprobe_fd(ev_name, fn_name, fd)
return self
def detach_kprobe_event(self, ev_name):
ev_name = _assert_is_bytes(ev_name)
fn_names = list(self.kprobe_fds[ev_name].keys())
for fn_name in fn_names:
self.detach_kprobe_event_by_fn(ev_name, fn_name)
def detach_kprobe_event_by_fn(self, ev_name, fn_name):
ev_name = _assert_is_bytes(ev_name)
fn_name = _assert_is_bytes(fn_name)
if ev_name not in self.kprobe_fds:
raise Exception("Kprobe %s is not attached" % ev_name)
res = lib.bpf_close_perf_event_fd(self.kprobe_fds[ev_name][fn_name])
if res < 0:
raise Exception("Failed to close kprobe FD")
self._del_kprobe_fd(ev_name, fn_name)
if len(self.kprobe_fds[ev_name]) == 0:
res = lib.bpf_detach_kprobe(ev_name)
if res < 0:
raise Exception("Failed to detach BPF from kprobe")
def detach_kprobe(self, event, fn_name=None):
event = _assert_is_bytes(event)
ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_")
if fn_name:
fn_name = _assert_is_bytes(fn_name)
self.detach_kprobe_event_by_fn(ev_name, fn_name)
else:
self.detach_kprobe_event(ev_name)
def detach_kretprobe(self, event, fn_name=None):
event = _assert_is_bytes(event)
ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_")
if fn_name:
fn_name = _assert_is_bytes(fn_name)
self.detach_kprobe_event_by_fn(ev_name, fn_name)
else:
self.detach_kprobe_event(ev_name)
@staticmethod
def attach_xdp(dev, fn, flags=0):
'''
This function attaches a BPF function to a device on the device
driver level (XDP)
'''
dev = _assert_is_bytes(dev)
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
res = lib.bpf_attach_xdp(dev, fn.fd, flags)
if res < 0:
err_no = ct.get_errno()
if err_no == errno.EBADMSG:
raise Exception("Internal error while attaching BPF to device,"+
" try increasing the debug level!")
else:
errstr = os.strerror(err_no)
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
@staticmethod
def remove_xdp(dev, flags=0):
'''
This function removes any BPF function from a device on the
device driver level (XDP)
'''
dev = _assert_is_bytes(dev)
res = lib.bpf_attach_xdp(dev, -1, flags)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to detach BPF from device %s: %s"
% (dev, errstr))
@classmethod
def _check_path_symbol(cls, module, symname, addr, pid, sym_off=0):
module = _assert_is_bytes(module)
symname = _assert_is_bytes(symname)
sym = bcc_symbol()
c_pid = 0 if pid == -1 else pid
if lib.bcc_resolve_symname(
module, symname,
addr or 0x0, c_pid,
ct.cast(None, ct.POINTER(bcc_symbol_option)),
ct.byref(sym),
) < 0:
raise Exception("could not determine address of symbol %s" % symname)
new_addr = sym.offset + sym_off
module_path = ct.cast(sym.module, ct.c_char_p).value
lib.bcc_procutils_free(sym.module)
return module_path, new_addr
@staticmethod
def find_library(libname):
libname = _assert_is_bytes(libname)
res = lib.bcc_procutils_which_so(libname, 0)
if not res:
return None
libpath = ct.cast(res, ct.c_char_p).value
lib.bcc_procutils_free(res)
return libpath
@staticmethod
def get_tracepoints(tp_re):
results = []
events_dir = os.path.join(TRACEFS, "events")
for category in os.listdir(events_dir):
cat_dir = os.path.join(events_dir, category)
if not os.path.isdir(cat_dir):
continue
for event in os.listdir(cat_dir):
evt_dir = os.path.join(cat_dir, event)
if os.path.isdir(evt_dir):
tp = ("%s:%s" % (category, event))
if re.match(tp_re.decode(), tp):
results.append(tp)
return results
@staticmethod
def tracepoint_exists(category, event):
evt_dir = os.path.join(TRACEFS, "events", category, event)
return os.path.isdir(evt_dir)
def attach_tracepoint(self, tp=b"", tp_re=b"", fn_name=b""):
"""attach_tracepoint(tp="", tp_re="", fn_name="")
Run the bpf function denoted by fn_name every time the kernel tracepoint
specified by 'tp' is hit. The optional parameters pid, cpu, and group_fd
can be used to filter the probe. The tracepoint specification is simply
the tracepoint category and the tracepoint name, separated by a colon.
For example: sched:sched_switch, syscalls:sys_enter_bind, etc.
Instead of a tracepoint name, a regular expression can be provided in
tp_re. The program will then attach to tracepoints that match the
provided regular expression.
To obtain a list of kernel tracepoints, use the tplist tool or cat the
file /sys/kernel/debug/tracing/available_events.
Examples:
BPF(text).attach_tracepoint(tp="sched:sched_switch", fn_name="on_switch")
BPF(text).attach_tracepoint(tp_re="sched:.*", fn_name="on_switch")
"""
tp = _assert_is_bytes(tp)
tp_re = _assert_is_bytes(tp_re)
fn_name = _assert_is_bytes(fn_name)
if tp_re:
for tp in BPF.get_tracepoints(tp_re):
self.attach_tracepoint(tp=tp, fn_name=fn_name)
return
fn = self.load_func(fn_name, BPF.TRACEPOINT)
(tp_category, tp_name) = tp.split(b':')
fd = lib.bpf_attach_tracepoint(fn.fd, tp_category, tp_name)
if fd < 0:
raise Exception("Failed to attach BPF program %s to tracepoint %s" %
(fn_name, tp))
self.tracepoint_fds[tp] = fd
return self
def attach_raw_tracepoint(self, tp=b"", fn_name=b""):
"""attach_raw_tracepoint(self, tp=b"", fn_name=b"")
Run the bpf function denoted by fn_name every time the kernel tracepoint
specified by 'tp' is hit. The bpf function should be loaded as a
RAW_TRACEPOINT type. The fn_name is the kernel tracepoint name,
e.g., sched_switch, sys_enter_bind, etc.
Examples:
BPF(text).attach_raw_tracepoint(tp="sched_switch", fn_name="on_switch")
"""
tp = _assert_is_bytes(tp)
if tp in self.raw_tracepoint_fds:
raise Exception("Raw tracepoint %s has been attached" % tp)
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.RAW_TRACEPOINT)
fd = lib.bpf_attach_raw_tracepoint(fn.fd, tp)
if fd < 0:
raise Exception("Failed to attach BPF to raw tracepoint")
self.raw_tracepoint_fds[tp] = fd
return self
def detach_raw_tracepoint(self, tp=b""):
"""detach_raw_tracepoint(tp="")
Stop running the bpf function that is attached to the kernel tracepoint
specified by 'tp'.
Example: bpf.detach_raw_tracepoint("sched_switch")
"""
tp = _assert_is_bytes(tp)
if tp not in self.raw_tracepoint_fds:
raise Exception("Raw tracepoint %s is not attached" % tp)
os.close(self.raw_tracepoint_fds[tp])
del self.raw_tracepoint_fds[tp]
@staticmethod
def add_prefix(prefix, name):
if not name.startswith(prefix):
name = prefix + name
return name
@staticmethod
def support_kfunc():
# there's no trampoline support for other than x86_64 arch
if platform.machine() != 'x86_64':
return False
if not lib.bpf_has_kernel_btf():
return False
# kernel symbol "bpf_trampoline_link_prog" indicates kfunc support
if BPF.ksymname("bpf_trampoline_link_prog") != -1:
return True
return False
@staticmethod
def support_lsm():
if not lib.bpf_has_kernel_btf():
return False
# kernel symbol "bpf_lsm_bpf" indicates BPF LSM support
if BPF.ksymname(b"bpf_lsm_bpf") != -1:
return True
return False
def detach_kfunc(self, fn_name=b""):
fn_name = _assert_is_bytes(fn_name)
fn_name = BPF.add_prefix(b"kfunc__", fn_name)
if fn_name not in self.kfunc_entry_fds:
raise Exception("Kernel entry func %s is not attached" % fn_name)
os.close(self.kfunc_entry_fds[fn_name])
del self.kfunc_entry_fds[fn_name]
def detach_kretfunc(self, fn_name=b""):
fn_name = _assert_is_bytes(fn_name)
fn_name = BPF.add_prefix(b"kretfunc__", fn_name)
if fn_name not in self.kfunc_exit_fds:
raise Exception("Kernel exit func %s is not attached" % fn_name)
os.close(self.kfunc_exit_fds[fn_name])
del self.kfunc_exit_fds[fn_name]
def attach_kfunc(self, fn_name=b""):
fn_name = _assert_is_bytes(fn_name)
fn_name = BPF.add_prefix(b"kfunc__", fn_name)
if fn_name in self.kfunc_entry_fds:
raise Exception("Kernel entry func %s has been attached" % fn_name)
fn = self.load_func(fn_name, BPF.TRACING)
fd = lib.bpf_attach_kfunc(fn.fd)
if fd < 0:
raise Exception("Failed to attach BPF to entry kernel func")
self.kfunc_entry_fds[fn_name] = fd
return self
def attach_kretfunc(self, fn_name=b""):
fn_name = _assert_is_bytes(fn_name)
fn_name = BPF.add_prefix(b"kretfunc__", fn_name)
if fn_name in self.kfunc_exit_fds:
raise Exception("Kernel exit func %s has been attached" % fn_name)
fn = self.load_func(fn_name, BPF.TRACING)
fd = lib.bpf_attach_kfunc(fn.fd)
if fd < 0:
raise Exception("Failed to attach BPF to exit kernel func")
self.kfunc_exit_fds[fn_name] = fd
return self
def detach_lsm(self, fn_name=b""):
fn_name = _assert_is_bytes(fn_name)
fn_name = BPF.add_prefix(b"lsm__", fn_name)
if fn_name not in self.lsm_fds:
raise Exception("LSM %s is not attached" % fn_name)
os.close(self.lsm_fds[fn_name])
del self.lsm_fds[fn_name]
def attach_lsm(self, fn_name=b""):
fn_name = _assert_is_bytes(fn_name)
fn_name = BPF.add_prefix(b"lsm__", fn_name)
if fn_name in self.lsm_fds:
raise Exception("LSM %s has been attached" % fn_name)
fn = self.load_func(fn_name, BPF.LSM)
fd = lib.bpf_attach_lsm(fn.fd)
if fd < 0:
raise Exception("Failed to attach LSM")
self.lsm_fds[fn_name] = fd
return self
@staticmethod
def support_raw_tracepoint():
# kernel symbol "bpf_find_raw_tracepoint" indicates raw_tracepoint support
if BPF.ksymname("bpf_find_raw_tracepoint") != -1 or \
BPF.ksymname("bpf_get_raw_tracepoint") != -1:
return True
return False
@staticmethod
def support_raw_tracepoint_in_module():
# kernel symbol "bpf_trace_modules" indicates raw tp support in modules, ref: kernel commit a38d1107
kallsyms = "/proc/kallsyms"
with open(kallsyms) as syms:
for line in syms:
(_, _, name) = line.rstrip().split(" ", 2)
name = name.split("\t")[0]
if name == "bpf_trace_modules":
return True
return False
@staticmethod
def kernel_struct_has_field(struct_name, field_name):
struct_name = _assert_is_bytes(struct_name)
field_name = _assert_is_bytes(field_name)
return lib.kernel_struct_has_field(struct_name, field_name)
def detach_tracepoint(self, tp=b""):
"""detach_tracepoint(tp="")
Stop running a bpf function that is attached to the kernel tracepoint
specified by 'tp'.
Example: bpf.detach_tracepoint("sched:sched_switch")
"""
tp = _assert_is_bytes(tp)
if tp not in self.tracepoint_fds:
raise Exception("Tracepoint %s is not attached" % tp)
res = lib.bpf_close_perf_event_fd(self.tracepoint_fds[tp])
if res < 0:
raise Exception("Failed to detach BPF from tracepoint")
(tp_category, tp_name) = tp.split(b':')
res = lib.bpf_detach_tracepoint(tp_category, tp_name)
if res < 0:
raise Exception("Failed to detach BPF from tracepoint")
del self.tracepoint_fds[tp]
def _attach_perf_event(self, progfd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd):
res = lib.bpf_attach_perf_event(progfd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd)
if res < 0:
raise Exception("Failed to attach BPF to perf event")
return res
def attach_perf_event(self, ev_type=-1, ev_config=-1, fn_name=b"",
sample_period=0, sample_freq=0, pid=-1, cpu=-1, group_fd=-1):
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.PERF_EVENT)
res = {}
if cpu >= 0:
res[cpu] = self._attach_perf_event(fn.fd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd)
else:
for i in get_online_cpus():
res[i] = self._attach_perf_event(fn.fd, ev_type, ev_config,
sample_period, sample_freq, pid, i, group_fd)
self.open_perf_events[(ev_type, ev_config)] = res
def _attach_perf_event_raw(self, progfd, attr, pid, cpu, group_fd):
res = lib.bpf_attach_perf_event_raw(progfd, ct.byref(attr), pid,
cpu, group_fd, 0)
if res < 0:
raise Exception("Failed to attach BPF to perf raw event")
return res
def attach_perf_event_raw(self, attr=-1, fn_name=b"", pid=-1, cpu=-1, group_fd=-1):
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.PERF_EVENT)
res = {}
if cpu >= 0:
res[cpu] = self._attach_perf_event_raw(fn.fd, attr,
pid, cpu, group_fd)
else:
for i in get_online_cpus():
res[i] = self._attach_perf_event_raw(fn.fd, attr,
pid, i, group_fd)
self.open_perf_events[(attr.type, attr.config)] = res
def detach_perf_event(self, ev_type=-1, ev_config=-1):
try:
fds = self.open_perf_events[(ev_type, ev_config)]
except KeyError:
raise Exception("Perf event type {} config {} not attached".format(
ev_type, ev_config))
res = 0
for fd in fds.values():
res = lib.bpf_close_perf_event_fd(fd) or res
if res != 0:
raise Exception("Failed to detach BPF from perf event")
del self.open_perf_events[(ev_type, ev_config)]
@staticmethod
def get_user_functions(name, sym_re):
return set([name for (name, _) in
BPF.get_user_functions_and_addresses(name, sym_re)])
@staticmethod
def get_user_addresses(name, sym_re):
"""
We are returning addresses here instead of symbol names because it
turns out that the same name may appear multiple times with different
addresses, and the same address may appear multiple times with the same
name. We can't attach a uprobe to the same address more than once, so
it makes sense to return the unique set of addresses that are mapped to
a symbol that matches the provided regular expression.
"""
return set([address for (_, address) in
BPF.get_user_functions_and_addresses(name, sym_re)])
@staticmethod
def get_user_functions_and_addresses(name, sym_re):
name = _assert_is_bytes(name)
sym_re = _assert_is_bytes(sym_re)
addresses = []
def sym_cb(sym_name, addr):
dname = sym_name
if re.match(sym_re, dname):
addresses.append((dname, addr))
return 0
res = lib.bcc_foreach_function_symbol(name, _SYM_CB_TYPE(sym_cb))
if res < 0:
raise Exception("Error %d enumerating symbols in %s" % (res, name))
return addresses
def _get_uprobe_evname(self, prefix, path, addr, pid):
if pid == -1:
return b"%s_%s_0x%x" % (prefix, self._probe_repl.sub(b"_", path), addr)
else:
# if pid is valid, put pid in the name, so different pid
# can have different event names
return b"%s_%s_0x%x_%d" % (prefix, self._probe_repl.sub(b"_", path), addr, pid)
def attach_uprobe(self, name=b"", sym=b"", sym_re=b"", addr=None,
fn_name=b"", pid=-1, sym_off=0):
"""attach_uprobe(name="", sym="", sym_re="", addr=None, fn_name=""
pid=-1, sym_off=0)
Run the bpf function denoted by fn_name every time the symbol sym in
the library or binary 'name' is encountered. Optional parameters pid,
cpu, and group_fd can be used to filter the probe.
If sym_off is given, attach uprobe to offset within the symbol.
The real address addr may be supplied in place of sym, in which case sym
must be set to its default value. If the file is a non-PIE executable,
addr must be a virtual address, otherwise it must be an offset relative
to the file load address.
Instead of a symbol name, a regular expression can be provided in
sym_re. The uprobe will then attach to symbols that match the provided
regular expression.
Libraries can be given in the name argument without the lib prefix, or
with the full path (/usr/lib/...). Binaries can be given only with the
full path (/bin/sh). If a PID is given, the uprobe will attach to the
version of the library used by the process.
Example: BPF(text).attach_uprobe("c", "malloc")
BPF(text).attach_uprobe("/usr/bin/python", "main")
"""
assert sym_off >= 0
if addr is not None:
assert sym_off == 0, "offset with addr is not supported"
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
sym_re = _assert_is_bytes(sym_re)
fn_name = _assert_is_bytes(fn_name)
if sym_re:
addresses = BPF.get_user_addresses(name, sym_re)
self._check_probe_quota(len(addresses))
for sym_addr in addresses:
self.attach_uprobe(name=name, addr=sym_addr,
fn_name=fn_name, pid=pid)
return
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid, sym_off)
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = self._get_uprobe_evname(b"p", path, addr, pid)
fd = lib.bpf_attach_uprobe(fn.fd, 0, ev_name, path, addr, pid)
if fd < 0:
raise Exception("Failed to attach BPF to uprobe")
self._add_uprobe_fd(ev_name, fd)
return self
def attach_uretprobe(self, name=b"", sym=b"", sym_re=b"", addr=None,
fn_name=b"", pid=-1):
"""attach_uretprobe(name="", sym="", sym_re="", addr=None, fn_name=""
pid=-1)
Run the bpf function denoted by fn_name every time the symbol sym in
the library or binary 'name' finishes execution. See attach_uprobe for
meaning of additional parameters.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
sym_re = _assert_is_bytes(sym_re)
fn_name = _assert_is_bytes(fn_name)
if sym_re:
for sym_addr in BPF.get_user_addresses(name, sym_re):
self.attach_uretprobe(name=name, addr=sym_addr,
fn_name=fn_name, pid=pid)
return
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = self._get_uprobe_evname(b"r", path, addr, pid)
fd = lib.bpf_attach_uprobe(fn.fd, 1, ev_name, path, addr, pid)
if fd < 0:
raise Exception("Failed to attach BPF to uretprobe")
self._add_uprobe_fd(ev_name, fd)
return self
def detach_uprobe_event(self, ev_name):
if ev_name not in self.uprobe_fds:
raise Exception("Uprobe %s is not attached" % ev_name)
res = lib.bpf_close_perf_event_fd(self.uprobe_fds[ev_name])
if res < 0:
raise Exception("Failed to detach BPF from uprobe")
res = lib.bpf_detach_uprobe(ev_name)
if res < 0:
raise Exception("Failed to detach BPF from uprobe")
self._del_uprobe_fd(ev_name)
def detach_uprobe(self, name=b"", sym=b"", addr=None, pid=-1, sym_off=0):
"""detach_uprobe(name="", sym="", addr=None, pid=-1)
Stop running a bpf function that is attached to symbol 'sym' in library
or binary 'name'.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid, sym_off)
ev_name = self._get_uprobe_evname(b"p", path, addr, pid)
self.detach_uprobe_event(ev_name)
def detach_uretprobe(self, name=b"", sym=b"", addr=None, pid=-1):
"""detach_uretprobe(name="", sym="", addr=None, pid=-1)
Stop running a bpf function that is attached to symbol 'sym' in library
or binary 'name'.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
ev_name = self._get_uprobe_evname(b"r", path, addr, pid)
self.detach_uprobe_event(ev_name)
def _trace_autoload(self):
for i in range(0, lib.bpf_num_functions(self.module)):
func_name = lib.bpf_function_name(self.module, i)
if func_name.startswith(b"kprobe__"):
fn = self.load_func(func_name, BPF.KPROBE)
self.attach_kprobe(
event=self.fix_syscall_fnname(func_name[8:]),
fn_name=fn.name)
elif func_name.startswith(b"kretprobe__"):
fn = self.load_func(func_name, BPF.KPROBE)
self.attach_kretprobe(
event=self.fix_syscall_fnname(func_name[11:]),
fn_name=fn.name)
elif func_name.startswith(b"tracepoint__"):
fn = self.load_func(func_name, BPF.TRACEPOINT)
tp = fn.name[len(b"tracepoint__"):].replace(b"__", b":")
self.attach_tracepoint(tp=tp, fn_name=fn.name)
elif func_name.startswith(b"raw_tracepoint__"):
fn = self.load_func(func_name, BPF.RAW_TRACEPOINT)
tp = fn.name[len(b"raw_tracepoint__"):]
self.attach_raw_tracepoint(tp=tp, fn_name=fn.name)
elif func_name.startswith(b"kfunc__"):
self.attach_kfunc(fn_name=func_name)
elif func_name.startswith(b"kretfunc__"):
self.attach_kretfunc(fn_name=func_name)
elif func_name.startswith(b"lsm__"):
self.attach_lsm(fn_name=func_name)
def trace_open(self, nonblocking=False):
"""trace_open(nonblocking=False)
Open the trace_pipe if not already open
"""
if not self.tracefile:
self.tracefile = open("%s/trace_pipe" % TRACEFS, "rb")
if nonblocking:
fd = self.tracefile.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return self.tracefile
def trace_fields(self, nonblocking=False):
"""trace_fields(nonblocking=False)
Read from the kernel debug trace pipe and return a tuple of the
fields (task, pid, cpu, flags, timestamp, msg) or None if no
line was read (nonblocking=True)
"""
while True:
line = self.trace_readline(nonblocking)
if not line and nonblocking: return (None,) * 6
# don't print messages related to lost events
if line.startswith(b"CPU:"): continue
task = line[:16].lstrip()
line = line[17:]
ts_end = line.find(b":")
try:
pid, cpu, flags, ts = line[:ts_end].split()
except Exception as e:
continue
cpu = cpu[1:-1]
# line[ts_end:] will have ": [sym_or_addr]: msgs"
# For trace_pipe debug output, the addr typically
# is invalid (e.g., 0x1). For kernel 4.12 or earlier,
# if address is not able to match a kernel symbol,
# nothing will be printed out. For kernel 4.13 and later,
# however, the illegal address will be printed out.
# Hence, both cases are handled here.
line = line[ts_end + 1:]
sym_end = line.find(b":")
msg = line[sym_end + 2:]
try:
return (task, int(pid), int(cpu), flags, float(ts), msg)
except Exception as e:
return ("Unknown", 0, 0, "Unknown", 0.0, "Unknown")
def trace_readline(self, nonblocking=False):
"""trace_readline(nonblocking=False)
Read from the kernel debug trace pipe and return one line
If nonblocking is False, this will block until ctrl-C is pressed.
"""
trace = self.trace_open(nonblocking)
line = None
try:
line = trace.readline(1024).rstrip()
except IOError:
pass
return line
def trace_print(self, fmt=None):
"""trace_print(self, fmt=None)
Read from the kernel debug trace pipe and print on stdout.
If fmt is specified, apply as a format string to the output. See
trace_fields for the members of the tuple
example: trace_print(fmt="pid {1}, msg = {5}")
"""
while True:
if fmt:
fields = self.trace_fields(nonblocking=False)
if not fields: continue
line = fmt.format(*fields)
else:
line = self.trace_readline(nonblocking=False)
print(line)
sys.stdout.flush()
@staticmethod
def _sym_cache(pid):
"""_sym_cache(pid)
Returns a symbol cache for the specified PID.
The kernel symbol cache is accessed by providing any PID less than zero.
"""
if pid < 0 and pid != -1:
pid = -1
if not pid in BPF._sym_caches:
BPF._sym_caches[pid] = SymbolCache(pid)
return BPF._sym_caches[pid]
@staticmethod
def sym(addr, pid, show_module=False, show_offset=False, demangle=True):
"""sym(addr, pid, show_module=False, show_offset=False)
Translate a memory address into a function name for a pid, which is
returned. When show_module is True, the module name is also included.
When show_offset is True, the instruction offset as a hexadecimal
number is also included in the string.
A pid of less than zero will access the kernel symbol cache.
Example output when both show_module and show_offset are True:
"start_thread+0x202 [libpthread-2.24.so]"
Example output when both show_module and show_offset are False:
"start_thread"
"""
#addr is of type stacktrace_build_id
#so invoke the bsym address resolver
typeofaddr = str(type(addr))
if typeofaddr.find('bpf_stack_build_id') != -1:
sym = bcc_symbol()
b = bcc_stacktrace_build_id()
b.status = addr.status
b.build_id = addr.build_id
b.u.offset = addr.offset
res = lib.bcc_buildsymcache_resolve(BPF._bsymcache,
ct.byref(b),
ct.byref(sym))
if res < 0:
if sym.module and sym.offset:
name,offset,module = (None, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
else:
name, offset, module = (None, addr, None)
else:
name, offset, module = (sym.name, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
else:
name, offset, module = BPF._sym_cache(pid).resolve(addr, demangle)
offset = b"+0x%x" % offset if show_offset and name is not None else b""
name = name or b"[unknown]"
name = name + offset
module = b" [%s]" % os.path.basename(module) \
if show_module and module is not None else b""
return name + module
@staticmethod
def ksym(addr, show_module=False, show_offset=False):
"""ksym(addr)
Translate a kernel memory address into a kernel function name, which is
returned. When show_module is True, the module name ("kernel") is also
included. When show_offset is true, the instruction offset as a
hexadecimal number is also included in the string.
Example output when both show_module and show_offset are True:
"default_idle+0x0 [kernel]"
"""
return BPF.sym(addr, -1, show_module, show_offset, False)
@staticmethod
def ksymname(name):
"""ksymname(name)
Translate a kernel name into an address. This is the reverse of
ksym. Returns -1 when the function name is unknown."""
return BPF._sym_cache(-1).resolve_name(None, name)
def num_open_kprobes(self):
"""num_open_kprobes()
Get the number of open K[ret]probes. Can be useful for scenarios where
event_re is used while attaching and detaching probes.
"""
return len(self.kprobe_fds)
def num_open_uprobes(self):
"""num_open_uprobes()
Get the number of open U[ret]probes.
"""
return len(self.uprobe_fds)
def num_open_tracepoints(self):
"""num_open_tracepoints()
Get the number of open tracepoints.
"""
return len(self.tracepoint_fds)
def perf_buffer_poll(self, timeout = -1):
"""perf_buffer_poll(self)
Poll from all open perf ring buffers, calling the callback that was
provided when calling open_perf_buffer for each entry.
"""
readers = (ct.c_void_p * len(self.perf_buffers))()
for i, v in enumerate(self.perf_buffers.values()):
readers[i] = v
lib.perf_reader_poll(len(readers), readers, timeout)
def kprobe_poll(self, timeout = -1):
"""kprobe_poll(self)
Deprecated. Use perf_buffer_poll instead.
"""
self.perf_buffer_poll(timeout)
def _open_ring_buffer(self, map_fd, fn, ctx=None):
if not self._ringbuf_manager:
self._ringbuf_manager = lib.bpf_new_ringbuf(map_fd, fn, ctx)
if not self._ringbuf_manager:
raise Exception("Could not open ring buffer")
else:
ret = lib.bpf_add_ringbuf(self._ringbuf_manager, map_fd, fn, ctx)
if ret < 0:
raise Exception("Could not open ring buffer")
def ring_buffer_poll(self, timeout = -1):
"""ring_buffer_poll(self)
Poll from all open ringbuf buffers, calling the callback that was
provided when calling open_ring_buffer for each entry.
"""
if not self._ringbuf_manager:
raise Exception("No ring buffers to poll")
lib.bpf_poll_ringbuf(self._ringbuf_manager, timeout)
def ring_buffer_consume(self):
"""ring_buffer_consume(self)
Consume all open ringbuf buffers, regardless of whether or not
they currently contain events data. This is best for use cases
where low latency is desired, but it can impact performance.
If you are unsure, use ring_buffer_poll instead.
"""
if not self._ringbuf_manager:
raise Exception("No ring buffers to poll")
lib.bpf_consume_ringbuf(self._ringbuf_manager)
def free_bcc_memory(self):
return lib.bcc_free_memory()
@staticmethod
def add_module(modname):
"""add_module(modname)
Add a library or exe to buildsym cache
"""
try:
lib.bcc_buildsymcache_add_module(BPF._bsymcache, modname.encode())
except Exception as e:
print("Error adding module to build sym cache"+str(e))
def donothing(self):
"""the do nothing exit handler"""
def cleanup(self):
# Clean up opened probes
for k, v in list(self.kprobe_fds.items()):
self.detach_kprobe_event(k)
for k, v in list(self.uprobe_fds.items()):
self.detach_uprobe_event(k)
for k, v in list(self.tracepoint_fds.items()):
self.detach_tracepoint(k)
for k, v in list(self.raw_tracepoint_fds.items()):
self.detach_raw_tracepoint(k)
for k, v in list(self.kfunc_entry_fds.items()):
self.detach_kfunc(k)
for k, v in list(self.kfunc_exit_fds.items()):
self.detach_kretfunc(k)
for k, v in list(self.lsm_fds.items()):
self.detach_lsm(k)
# Clean up opened perf ring buffer and perf events
table_keys = list(self.tables.keys())
for key in table_keys:
if isinstance(self.tables[key], PerfEventArray):
del self.tables[key]
for (ev_type, ev_config) in list(self.open_perf_events.keys()):
self.detach_perf_event(ev_type, ev_config)
if self.tracefile:
self.tracefile.close()
self.tracefile = None
for name, fn in list(self.funcs.items()):
os.close(fn.fd)
del self.funcs[name]
if self.module:
lib.bpf_module_destroy(self.module)
self.module = None
# Clean up ringbuf
if self._ringbuf_manager:
lib.bpf_free_ringbuf(self._ringbuf_manager)
self._ringbuf_manager = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
| {
"content_hash": "9deea862e420e2ffcb859208baab086e",
"timestamp": "",
"source": "github",
"line_count": 1755,
"max_line_length": 110,
"avg_line_length": 37.54188034188034,
"alnum_prop": 0.5650214006010381,
"repo_name": "brendangregg/bcc",
"id": "2ff5cf020470aa58b1bedc0b52f1bf8e4851644b",
"size": "66455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/bcc/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8274020"
},
{
"name": "C++",
"bytes": "890599"
},
{
"name": "CMake",
"bytes": "48236"
},
{
"name": "HTML",
"bytes": "2997"
},
{
"name": "Lua",
"bytes": "299473"
},
{
"name": "Makefile",
"bytes": "3214"
},
{
"name": "Python",
"bytes": "1377079"
},
{
"name": "Shell",
"bytes": "21733"
}
],
"symlink_target": ""
} |
"""Tools for working with the host system"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Nick Moffitt <[email protected]>
# Matthew Wedgwood <[email protected]>
import os
import re
import pwd
import grp
import random
import string
import subprocess
import hashlib
from contextlib import contextmanager
from collections import OrderedDict
import six
from .hookenv import log
from .fstab import Fstab
def service_start(service_name):
"""Start a system service"""
return service('start', service_name)
def service_stop(service_name):
"""Stop a system service"""
return service('stop', service_name)
def service_restart(service_name):
"""Restart a system service"""
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if
reload fails"""
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
return service_result
def service(action, service_name):
"""Control a system service"""
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
def service_running(service):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(
['service', service, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return 'unrecognized service' not in e.output
else:
return True
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user to the system"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = [
'gpasswd', '-a',
username,
group
]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def rsync(from_path, to_path, flags='-r', options=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd).decode('UTF-8').strip()
def symlink(source, destination):
"""Create a symbolic link"""
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms)
def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a string"""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
with open(path, 'w') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
target.write(content)
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab
"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file
"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
return fstab_add(device, mountpoint, filesystem, options=options)
return True
def umount(mountpoint, persist=False):
"""Unmount a filesystem"""
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
return fstab_remove(mountpoint)
return True
def mounts():
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def file_hash(path, hash_type='md5'):
"""
Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
pass
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
})
def ceph_client_changed():
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function.
"""
def wrap(f):
def wrapped_f(*args):
checksums = {}
for path in restart_map:
checksums[path] = file_hash(path)
f(*args)
restarts = []
for path in restart_map:
if checksums[path] != file_hash(path):
restarts += restart_map[path]
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return wrapped_f
return wrap
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}
with open('/etc/lsb-release', 'r') as lsb:
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def pwgen(length=None):
"""Generate a random pasword."""
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def list_nics(nic_type):
'''Return a list of nics of given type(s)'''
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces
def set_nic_mtu(nic, mtu):
'''Set MTU on a network interface'''
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
'''
import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
@contextmanager
def chdir(d):
cur = os.getcwd()
try:
yield os.chdir(d)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True):
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
chown = os.chown
else:
chown = os.lchown
for root, dirs, files in os.walk(path):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
chown(full, uid, gid)
def lchownr(path, owner, group):
chownr(path, owner, group, follow_links=False)
| {
"content_hash": "8550ff427210d308ab7191b809982ac0",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 79,
"avg_line_length": 29.983720930232558,
"alnum_prop": 0.5991623361514,
"repo_name": "mbirru/midonet-charms",
"id": "cf2cbe14ec08acfe1b846db4972a3d95e05444ea",
"size": "13571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron-api/hooks/charmhelpers/core/host.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5315"
},
{
"name": "Makefile",
"bytes": "565"
},
{
"name": "Python",
"bytes": "1120549"
},
{
"name": "Shell",
"bytes": "738"
}
],
"symlink_target": ""
} |
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_linalg_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Names below are lower_case.
# pylint: disable=invalid-name
def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):
r"""Computes Cholesky factorization of regularized gramian matrix.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\),
`l2_regularizer`=\\(\lambda\\).
If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A^H A + \lambda I\\).
If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A A^H + \lambda I\\).
Args:
matrix: `Tensor` of shape `[..., M, N]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
first_kind: bool. Controls what gramian matrix to factor.
Returns:
output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2
dimensions contain the Cholesky factors \\(L\\) described above.
"""
gramian = math_ops.matmul(
matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind)
if isinstance(l2_regularizer, ops.Tensor) or l2_regularizer != 0:
matrix_shape = array_ops.shape(matrix)
batch_shape = matrix_shape[:-2]
if first_kind:
small_dim = matrix_shape[-1]
else:
small_dim = matrix_shape[-2]
identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype)
small_dim_static = matrix.shape[-1 if first_kind else -2]
identity.set_shape(
matrix.shape[:-2].concatenate([small_dim_static, small_dim_static]))
gramian += l2_regularizer * identity
return gen_linalg_ops.cholesky(gramian)
@tf_export('cholesky_solve', 'linalg.cholesky_solve')
def cholesky_solve(chol, rhs, name=None):
"""Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.
```python
# Solve 10 separate 2x2 linear systems:
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
chol = tf.cholesky(A) # shape 10 x 2 x 2
X = tf.cholesky_solve(chol, RHS) # shape 10 x 2 x 1
# tf.matmul(A, X) ~ RHS
X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (K = 5) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
```
Args:
chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`.
Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`.
For that reason, only the lower triangular parts (including the diagonal)
of the last two dimensions of `chol` are used. The strictly upper part is
assumed to be zero and not accessed.
rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
name: A name to give this `Op`. Defaults to `cholesky_solve`.
Returns:
Solution to `A x = rhs`, shape `[..., M, K]`.
"""
# To solve C C^* x = rhs, we
# 1. Solve C y = rhs for y, thus y = C^* x
# 2. Solve C^* x = y for x
with ops.name_scope(name, 'cholesky_solve', [chol, rhs]):
y = gen_linalg_ops.matrix_triangular_solve(
chol, rhs, adjoint=False, lower=True)
x = gen_linalg_ops.matrix_triangular_solve(
chol, y, adjoint=True, lower=True)
return x
@tf_export('eye', 'linalg.eye')
def eye(num_rows,
num_columns=None,
batch_shape=None,
dtype=dtypes.float32,
name=None):
"""Construct an identity matrix, or a batch of matrices.
```python
# Construct one identity matrix.
tf.eye(2)
==> [[1., 0.],
[0., 1.]]
# Construct a batch of 3 identity matricies, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])
# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)
==> [[ 1., 0., 0.],
[ 0., 1., 0.]]
```
Args:
num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
in each batch matrix.
num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
of columns in each batch matrix. Defaults to `num_rows`.
batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`.
If provided, the returned `Tensor` will have leading batch dimensions of
this shape.
dtype: The type of an element in the resulting `Tensor`
name: A name for this `Op`. Defaults to "eye".
Returns:
A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
"""
with ops.name_scope(
name, default_name='eye', values=[num_rows, num_columns, batch_shape]):
is_square = num_columns is None
batch_shape = [] if batch_shape is None else batch_shape
num_columns = num_rows if num_columns is None else num_columns
if isinstance(num_rows, ops.Tensor) or isinstance(
num_columns, ops.Tensor) or isinstance(batch_shape, ops.Tensor):
batch_shape = ops.convert_to_tensor(
batch_shape, name='shape', dtype=dtypes.int32)
diag_size = math_ops.minimum(num_rows, num_columns)
diag_shape = array_ops.concat((batch_shape, [diag_size]), 0)
if not is_square:
shape = array_ops.concat((batch_shape, [num_rows, num_columns]), 0)
else:
if not isinstance(num_rows, compat.integral_types) or not isinstance(
num_columns, compat.integral_types):
raise TypeError(
'num_rows and num_columns must be positive integer values.')
batch_shape = [dim for dim in batch_shape]
is_square = num_rows == num_columns
diag_shape = batch_shape + [np.minimum(num_rows, num_columns)]
if not is_square:
shape = batch_shape + [num_rows, num_columns]
diag_ones = array_ops.ones(diag_shape, dtype=dtype)
if is_square:
return array_ops.matrix_diag(diag_ones)
else:
zero_matrix = array_ops.zeros(shape, dtype=dtype)
return array_ops.matrix_set_diag(zero_matrix, diag_ones)
@tf_export('matrix_solve_ls', 'linalg.lstsq')
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves one or more linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
`Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
\\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
the minimum-norm solution to the under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
\\(A Z = B\\). Notice that the fast path is only numerically stable when
\\(A\\) is numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
is sufficiently large.
If `fast` is `False` an algorithm based on the numerically robust complete
orthogonal decomposition is used. This computes the minimum-norm
least-squares solution, even when \\(A\\) is rank deficient. This path is
typically 6-7 times slower than the fast path. If `fast` is `False` then
`l2_regularizer` is ignored.
Args:
matrix: `Tensor` of shape `[..., M, N]`.
rhs: `Tensor` of shape `[..., M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
Raises:
NotImplementedError: matrix_solve_ls is currently disabled for complex128
and l2_regularizer != 0 due to poor accuracy.
"""
# pylint: disable=protected-access,long-lambda
def _use_composite_impl(fast, tensor_shape):
"""Determines whether to use the composite or specialized CPU kernel.
When the total size of the tensor is larger than the cache size and the
batch size is large compared to the smallest matrix dimension, then the
composite implementation is inefficient since it has to read the entire
tensor from memory multiple times. In this case we fall back to the
original CPU kernel, which does all the computational steps on each
matrix separately.
Only fast mode is supported by the composite impl, so `False` is returned
if `fast` is `False`.
Args:
fast: bool indicating if fast mode in the solver was requested.
tensor_shape: The shape of the tensor.
Returns:
True if the composite impl should be used. False otherwise.
"""
if fast is False:
return False
batch_shape = tensor_shape[:-2]
matrix_shape = tensor_shape[-2:]
if not tensor_shape.is_fully_defined():
return True
tensor_size = tensor_shape.num_elements() * matrix.dtype.size
is_io_bound = batch_shape.num_elements() > np.min(matrix_shape)
L2_CACHE_SIZE_GUESSTIMATE = 256000
if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound:
return False
else:
return True
def _overdetermined(matrix, rhs, l2_regularizer):
"""Computes (A^H*A + l2_regularizer)^{-1} * A^H * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=True)
return cholesky_solve(chol, math_ops.matmul(matrix, rhs, adjoint_a=True))
def _underdetermined(matrix, rhs, l2_regularizer):
"""Computes A^H * (A*A^H + l2_regularizer)^{-1} * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=False)
return math_ops.matmul(matrix, cholesky_solve(chol, rhs), adjoint_a=True)
def _composite_impl(matrix, rhs, l2_regularizer):
"""Composite implementation of matrix_solve_ls that supports GPU."""
with ops.name_scope(name, 'matrix_solve_ls', [matrix, rhs, l2_regularizer]):
matrix_shape = matrix.get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _overdetermined(matrix, rhs, l2_regularizer)
else:
return _underdetermined(matrix, rhs, l2_regularizer)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(matrix)[-2:]
return control_flow_ops.cond(
matrix_shape[-2] >= matrix_shape[-1],
lambda: _overdetermined(matrix, rhs, l2_regularizer),
lambda: _underdetermined(matrix, rhs, l2_regularizer))
matrix = ops.convert_to_tensor(matrix, name='matrix')
if matrix.dtype == dtypes.complex128 and l2_regularizer != 0:
# TODO(rmlarsen): Investigate and fix accuracy bug.
raise NotImplementedError('matrix_solve_ls is currently disabled for '
'complex128 and l2_regularizer != 0 due to '
'poor accuracy.')
tensor_shape = matrix.get_shape()
if _use_composite_impl(fast, tensor_shape):
return _composite_impl(matrix, rhs, l2_regularizer)
else:
return gen_linalg_ops._matrix_solve_ls(
matrix, rhs, l2_regularizer, fast=fast, name=name)
# pylint: enable=protected-access
@tf_export('self_adjoint_eig', 'linalg.eigh')
def self_adjoint_eig(tensor, name=None):
"""Computes the eigen decomposition of a batch of self-adjoint matrices.
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices
in `tensor` such that
`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.
Args:
tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
each inner inner matrix is referenced.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`.
v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
matrices contain eigenvectors of the corresponding matrices in `tensor`
"""
# pylint: disable=protected-access
e, v = gen_linalg_ops._self_adjoint_eig_v2(tensor, compute_v=True, name=name)
return e, v
@tf_export('self_adjoint_eigvals', 'linalg.eigvalsh')
def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Note: If your program backpropagates through this function, you should replace
it with a call to tf.self_adjoint_eig (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
_SelfAdjointEigV2Grad in linalg_grad.py.
Args:
tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
eigenvalues of `tensor[..., :, :]`.
"""
# pylint: disable=protected-access
e, _ = gen_linalg_ops._self_adjoint_eig_v2(tensor, compute_v=False, name=name)
return e
@tf_export('svd', 'linalg.svd')
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
r"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
transpose(conj(v[..., :, :]))`
```python
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
second largest, etc.
u: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.svd, except that
* The order of output arguments here is `s`, `u`, `v` when `compute_uv` is
`True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd.
* full_matrices is `False` by default as opposed to `True` for
numpy.linalg.svd.
* tf.linalg.svd uses the standard definition of the SVD
\\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are
the columns of `u`, while the right singular vectors of `a` are the
columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint
\\(V^H\\) as the third output argument.
```python
import tensorflow as tf
import numpy as np
s, u, v = tf.linalg.svd(a)
tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
u, s, v_adj = np.linalg.svd(a, full_matrices=False)
np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
# tf_a_approx and np_a_approx should be numerically close.
````
@end_compatibility
"""
# pylint: disable=protected-access
s, u, v = gen_linalg_ops._svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices, name=name)
# pylint: enable=protected-access
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: disable=redefined-builtin
@tf_export('norm', 'linalg.norm')
@deprecation.deprecated_args(
None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')
def norm(tensor,
ord='euclidean',
axis=None,
keepdims=None,
name=None,
keep_dims=None):
r"""Computes the norm of vectors, matrices, and tensors.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are 'fro', 'euclidean',
`1`, `2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply:
a) The Frobenius norm `fro` is not defined for vectors,
b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`,
`np.inf` are supported.
See the description of `axis` on how to compute norms for a batch of
vectors or matrices stored in a tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`.
If `axis` is a Python integer, the input is considered a batch of vectors,
and `axis` determines the axis in `tensor` over which to compute vector
norms.
If `axis` is a 2-tuple of Python integers it is considered a batch of
matrices and `axis` determines the axes in `tensor` over which to compute
a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
keepdims: If True, the axis indicated in `axis` are kept with size 1.
Otherwise, the dimensions in `axis` are removed from the output shape.
name: The name of the op.
keep_dims: Deprecated alias for `keepdims`.
Returns:
output: A `Tensor` of the same type as tensor, containing the vector or
matrix norms. If `keepdims` is True then the rank of output is equal to
the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
if `axis` is an integer, the rank of `output` is one less than the rank
of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
than the rank of `tensor`.
Raises:
ValueError: If `ord` or `axis` is invalid.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.norm.
Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
Other differences:
a) If axis is `None`, treats the flattened `tensor` as a vector
regardless of rank.
b) Explicitly supports 'euclidean' norm as the default, including for
higher order tensors.
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims,
'keep_dims', keep_dims)
if keepdims is None:
keepdims = False
is_matrix_norm = ((isinstance(axis, tuple) or isinstance(axis, list)) and
len(axis) == 2)
if is_matrix_norm:
axis = tuple(axis)
if (not isinstance(axis[0], int) or not isinstance(axis[1], int) or
axis[0] == axis[1]):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
# TODO(rmlarsen): Implement matrix 2-norm using tf.svd().
supported_matrix_norms = ['euclidean', 'fro', 1, np.inf]
if ord not in supported_matrix_norms:
raise ValueError("'ord' must be a supported matrix norm in %s, got %s" %
(supported_matrix_norms, ord))
else:
if not (isinstance(axis, int) or axis is None):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
supported_vector_norms = ['euclidean', 1, 2, np.inf]
if (not np.isreal(ord) or ord <= 0) and ord not in supported_vector_norms:
raise ValueError("'ord' must be a supported vector norm, got %s" % ord)
if axis is not None:
axis = (axis,)
with ops.name_scope(name, 'norm', [tensor]):
tensor = ops.convert_to_tensor(tensor)
if ord in ['fro', 'euclidean', 2, 2.0]:
# TODO(rmlarsen): Move 2-norm to a separate clause once we support it for
# matrices.
result = math_ops.sqrt(
math_ops.reduce_sum(
tensor * math_ops.conj(tensor), axis, keepdims=True))
else:
result = math_ops.abs(tensor)
if ord == 1:
sum_axis = None if axis is None else axis[0]
result = math_ops.reduce_sum(result, sum_axis, keepdims=True)
if is_matrix_norm:
result = math_ops.reduce_max(result, axis[-1], keepdims=True)
elif ord == np.inf:
if is_matrix_norm:
result = math_ops.reduce_sum(result, axis[1], keepdims=True)
max_axis = None if axis is None else axis[0]
result = math_ops.reduce_max(result, max_axis, keepdims=True)
else:
# General p-norms (positive p only)
result = math_ops.pow(
math_ops.reduce_sum(math_ops.pow(result, ord), axis, keepdims=True),
1.0 / ord)
if not keepdims:
result = array_ops.squeeze(result, axis)
return result
# pylint: enable=invalid-name,redefined-builtin
| {
"content_hash": "f163b617545ef88a9916ae38fa2a617a",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 80,
"avg_line_length": 41.666666666666664,
"alnum_prop": 0.6479144385026738,
"repo_name": "rabipanda/tensorflow",
"id": "9803eed6aefe072cbe0841dff2de3f640a440dd5",
"size": "24064",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9008"
},
{
"name": "C",
"bytes": "332772"
},
{
"name": "C++",
"bytes": "36554246"
},
{
"name": "CMake",
"bytes": "190994"
},
{
"name": "Go",
"bytes": "1058787"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "543556"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "49545"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94526"
},
{
"name": "PHP",
"bytes": "1487"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "32129142"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "412473"
}
],
"symlink_target": ""
} |
import datetime
import requests
from math import log10
"""
To access the data, corresponding numbers:
des = 0
orbit_id = 1
jd = 2
cd = 3
dist = 4
dist_min = 5
dist_max = 6
v_rel = 7
v_inf = 8
t_signma_F = 9
h = 10
"""
def diameter_calc(log_value, mag):
#formula for diamter calculation
return 10**(0.5*(6.259 - log10(log_value) - 0.4*mag))
def flyby_data():
json_data = requests.get("https://ssd-api.jpl.nasa.gov/cad.api?body=Earth&dist-max=20LD").json()
neo_data = []
# unix = time.time()
# datestamp = datetime.datetime.fromtimestamp(unix).strftime("%Y-%b-%d")
# better way to do it:
now = datetime.datetime.now()
datestamp = "{:%Y-%b-%d}".format(now)
for data in json_data["data"]:
neo_date = data[3][:11]
neo_time = data[3][12:]
neo_des = data[0]
magnitude = float(data[10])
diameter_min = diameter_calc(0.25, magnitude)
diameter_max = diameter_calc(0.05, magnitude)
if neo_date == datestamp:
neo_data.append((neo_des, neo_time, round(diameter_min, 4), round(diameter_max, 4)))
return neo_data
| {
"content_hash": "250a4a86a9e1b7a1a79c43a1d44dba71",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 100,
"avg_line_length": 21.22641509433962,
"alnum_prop": 0.6062222222222222,
"repo_name": "FXelix/space_facts_bot",
"id": "d08e64b3c05fd74d2e179db9fb9eea15a69bbc62",
"size": "1126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NEO_flyby.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4408"
}
],
"symlink_target": ""
} |
"""Tests for initializers in init_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InitializersTest(test.TestCase):
def _identical_test(self,
init1,
init2,
assertion,
shape=None,
dtype=dtypes.float32):
if shape is None:
shape = [100]
t1 = self.evaluate(init1(shape, dtype))
t2 = self.evaluate(init2(shape, dtype))
self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
self.assertEqual(assertion, np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
def _duplicated_test(self, init, shape=None, dtype=dtypes.float32):
if shape is None:
shape = [100]
t1 = self.evaluate(init(shape, dtype))
t2 = self.evaluate(init(shape, dtype))
self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
self.assertFalse(np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
def _range_test(self,
init,
shape,
target_mean=None,
target_std=None,
target_max=None,
target_min=None):
output = self.evaluate(init(shape))
self.assertEqual(output.shape, shape)
lim = 3e-2
if target_std is not None:
self.assertGreater(lim, abs(output.std() - target_std))
if target_mean is not None:
self.assertGreater(lim, abs(output.mean() - target_mean))
if target_max is not None:
self.assertGreater(lim, abs(output.max() - target_max))
if target_min is not None:
self.assertGreater(lim, abs(output.min() - target_min))
def _partition_test(self, init):
full_shape = (4, 2)
partition_shape = (2, 2)
partition_offset = (0, 0)
full_value = self.evaluate(init(full_shape, dtype=dtypes.float32))
got = self.evaluate(
init(
full_shape,
dtype=dtypes.float32,
partition_shape=partition_shape,
partition_offset=partition_offset))
self.assertEqual(got.shape, partition_shape)
self.assertAllClose(
got, array_ops.slice(full_value, partition_offset, partition_shape))
class ConstantInitializersTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testZeros(self):
self._range_test(
init_ops_v2.Zeros(), shape=(4, 5), target_mean=0., target_max=0.)
@test_util.run_in_graph_and_eager_modes
def testZerosPartition(self):
init = init_ops_v2.Zeros()
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testZerosInvalidKwargs(self):
init = init_ops_v2.Zeros()
with self.assertRaisesWithLiteralMatch(TypeError,
r"Unknown keyword arguments: dtpye"):
init((2, 2), dtpye=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testOnes(self):
self._range_test(
init_ops_v2.Ones(), shape=(4, 5), target_mean=1., target_max=1.)
@test_util.run_in_graph_and_eager_modes
def testOnesPartition(self):
init = init_ops_v2.Ones()
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testConstantInt(self):
self._range_test(
init_ops_v2.Constant(2),
shape=(5, 6, 4),
target_mean=2,
target_max=2,
target_min=2)
@test_util.run_in_graph_and_eager_modes
def testConstantPartition(self):
init = init_ops_v2.Constant([1, 2, 3, 4])
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Constant initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
@test_util.run_in_graph_and_eager_modes
def testConstantTuple(self):
init = init_ops_v2.constant_initializer((10, 20, 30))
tensor = init(shape=[3])
self.assertAllEqual(self.evaluate(tensor), [10, 20, 30])
self.assertEqual(tensor.shape, [3])
@test_util.run_in_graph_and_eager_modes
def testConstantInvalidValue(self):
c = constant_op.constant([1.0, 2.0, 3.0])
with self.assertRaisesRegex(TypeError,
r"Invalid type for initial value: .*Tensor.*"):
init_ops_v2.constant_initializer(c)
v = variables.Variable([3.0, 2.0, 1.0])
with self.assertRaisesRegex(
TypeError, r"Invalid type for initial value: .*Variable.*"):
init_ops_v2.constant_initializer(v)
def _testNDimConstantInitializer(self, value, shape, expected):
with test_util.use_gpu():
init = init_ops_v2.constant_initializer(value)
x = init(shape)
actual = self.evaluate(array_ops.reshape(x, [-1]))
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
@test_util.run_in_graph_and_eager_modes
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer(value, shape, expected)
self._testNDimConstantInitializer(np.asarray(value), shape, expected)
self._testNDimConstantInitializer(
np.asarray(value).reshape(tuple(shape)), shape, expected)
def _testNDimConstantInitializerIncorrectNumberValues(self, value, shape):
with test_util.use_gpu():
init = init_ops_v2.constant_initializer(value)
self.assertRaises(TypeError, init, shape=shape)
@test_util.run_in_graph_and_eager_modes
def testNDimConstantInitializerIncorrectNumberValues(self):
value = [0, 1, 2, 3, 4, 5]
for shape in [[2, 4], [2, 2]]:
self._testNDimConstantInitializerIncorrectNumberValues(value, shape)
self._testNDimConstantInitializerIncorrectNumberValues(
np.asarray(value), shape)
self._testNDimConstantInitializerIncorrectNumberValues(
np.asarray(value).reshape(tuple([2, 3])), shape)
class RandomUniformInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
shape = (20, 6, 7)
self._range_test(
init_ops_v2.RandomUniform(minval=-1, maxval=1, seed=124),
shape,
target_mean=0.,
target_max=1,
target_min=-1)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.RandomUniform(0, 7, seed=1)
init2 = init_ops_v2.RandomUniform(0, 7, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.RandomUniform(0, 7, seed=1)
init2 = init_ops_v2.RandomUniform(0, 7, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.RandomUniform(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
init = init_ops_v2.RandomUniform(0, 7, seed=1)
self._partition_test(init)
class RandomNormalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
self._range_test(
init_ops_v2.RandomNormal(mean=0, stddev=1, seed=153),
shape=(8, 12, 99),
target_mean=0.,
target_std=1)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.RandomNormal(0, 7, seed=1)
init2 = init_ops_v2.RandomNormal(0, 7, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.RandomNormal(0, 7, seed=1)
init2 = init_ops_v2.RandomNormal(0, 7, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.RandomNormal(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
if test_util.is_xla_enabled():
self.skipTest(
"XLA ignores seeds for RandomNormal, skip xla-enabled test.")
init = init_ops_v2.RandomNormal(0, 7, seed=1)
self._partition_test(init)
class TruncatedNormalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
self._range_test(
init_ops_v2.TruncatedNormal(mean=0, stddev=1, seed=126),
shape=(12, 99, 7),
target_mean=0.,
target_max=2,
target_min=-2)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Not seeming to work in Eager mode")
init1 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
init2 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
init2 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.TruncatedNormal(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
init = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testInvalidDataType(self):
init = init_ops_v2.TruncatedNormal(0.0, 1.0)
with self.assertRaises(ValueError):
init([1], dtype=dtypes.int32)
class VarianceScalingInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testTruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="truncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "truncated_normal",
wraps=random_ops.truncated_normal) as mock_truncated_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="truncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "truncated_normal",
wraps=random_ops.truncated_normal) as mock_truncated_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUntruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="untruncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "random_normal",
wraps=random_ops.random_normal) as mock_random_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="uniform")
with test_util.use_gpu():
x = self.evaluate(init(shape))
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
partition_shape = (100, 100)
shape = [1000, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="untruncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "random_normal",
wraps=random_ops.random_normal) as mock_random_normal:
x = self.evaluate(init(shape, partition_shape=partition_shape))
self.assertTrue(mock_random_normal.called)
self.assertEqual(x.shape, partition_shape)
self.assertNear(np.mean(x), expect_mean, err=1e-3)
self.assertNear(np.var(x), expect_var, err=1e-3)
class OrthogonalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
self._range_test(
init_ops_v2.Orthogonal(seed=123), shape=(20, 20), target_mean=0.)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.Orthogonal(seed=1)
init2 = init_ops_v2.Orthogonal(seed=1)
self._identical_test(init1, init2, True, (10, 10))
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.Orthogonal(seed=1)
init2 = init_ops_v2.Orthogonal(seed=2)
self._identical_test(init1, init2, False, (10, 10))
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.Orthogonal()
self._duplicated_test(init, (10, 10))
@test_util.run_in_graph_and_eager_modes
def testInvalidDataType(self):
init = init_ops_v2.Orthogonal()
self.assertRaises(ValueError, init, shape=(10, 10), dtype=dtypes.string)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
init = init_ops_v2.Orthogonal()
with test_util.use_gpu():
self.assertRaises(ValueError, init, shape=[5])
@test_util.run_in_graph_and_eager_modes
def testGain(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.Orthogonal(seed=1)
init2 = init_ops_v2.Orthogonal(gain=3.14, seed=1)
with test_util.use_gpu():
t1 = self.evaluate(init1(shape=(10, 10)))
t2 = self.evaluate(init2(shape=(10, 10)))
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_in_graph_and_eager_modes
def testShapesValues(self):
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops_v2.Orthogonal()
tol = 1e-5
with test_util.use_gpu():
# Check the shape
t = self.evaluate(init(shape))
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
@test_util.run_in_graph_and_eager_modes
def testPartition(self):
init = init_ops_v2.Orthogonal(seed=1)
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Orthogonal initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
class IdentityInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRange(self):
with self.assertRaises(ValueError):
shape = (3, 4, 5)
self._range_test(
init_ops_v2.Identity(),
shape=shape,
target_mean=1. / shape[0],
target_max=1.)
shape = (3, 3)
self._range_test(
init_ops_v2.Identity(),
shape=shape,
target_mean=1. / shape[0],
target_max=1.)
@test_util.run_in_graph_and_eager_modes
def testInvalidDataType(self):
init = init_ops_v2.Identity()
self.assertRaises(ValueError, init, shape=[10, 5], dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
init = init_ops_v2.Identity()
with test_util.use_gpu():
self.assertRaises(ValueError, init, shape=[5, 7, 7])
self.assertRaises(ValueError, init, shape=[5])
self.assertRaises(ValueError, init, shape=[])
@test_util.run_in_graph_and_eager_modes
def testNonSquare(self):
init = init_ops_v2.Identity()
shape = (10, 5)
with test_util.use_gpu():
self.assertAllClose(self.evaluate(init(shape)), np.eye(*shape))
@test_util.run_in_graph_and_eager_modes
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init_default = init_ops_v2.Identity()
init_custom = init_ops_v2.Identity(gain=0.9)
with test_util.use_gpu():
self.assertAllClose(
self.evaluate(init_default(shape, dtype=dtype)), np.eye(*shape))
with test_util.use_gpu():
self.assertAllClose(
self.evaluate(init_custom(shape, dtype=dtype)),
np.eye(*shape) * 0.9)
@test_util.run_in_graph_and_eager_modes
def testPartition(self):
init = init_ops_v2.Identity()
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Identity initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
class GlorotInitializersTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testGlorotUniform(self):
shape = (5, 6, 4, 2)
fan_in, fan_out = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._range_test(
init_ops_v2.GlorotUniform(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def test_GlorotNormal(self):
shape = (5, 6, 4, 2)
fan_in, fan_out = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._range_test(
init_ops_v2.GlorotNormal(seed=123),
shape,
target_mean=0.,
target_std=std)
class MethodInitializers(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testLecunUniform(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(1. / fan_in)
self._range_test(
init_ops_v2.lecun_uniform(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def testHeUniform(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / fan_in)
self._range_test(
init_ops_v2.he_uniform(seed=123), shape, target_mean=0., target_std=std)
@test_util.run_in_graph_and_eager_modes
def testLecunNormal(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(1. / fan_in)
self._range_test(
init_ops_v2.lecun_normal(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def testHeNormal(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / fan_in)
self._range_test(
init_ops_v2.he_normal(seed=123), shape, target_mean=0., target_std=std)
if __name__ == "__main__":
test.main()
| {
"content_hash": "7584794744e2b1de83dab3184e2d2d96",
"timestamp": "",
"source": "github",
"line_count": 580,
"max_line_length": 80,
"avg_line_length": 34.18103448275862,
"alnum_prop": 0.6587137452711224,
"repo_name": "cxxgtxy/tensorflow",
"id": "2de636c2f8df6ab5c023d03ac9b76f5c2c51cd5e",
"size": "20514",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/init_ops_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "186817"
},
{
"name": "C++",
"bytes": "24882047"
},
{
"name": "CMake",
"bytes": "164374"
},
{
"name": "Go",
"bytes": "854846"
},
{
"name": "HTML",
"bytes": "564161"
},
{
"name": "Java",
"bytes": "307246"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "225621"
},
{
"name": "Python",
"bytes": "22009999"
},
{
"name": "Shell",
"bytes": "341543"
},
{
"name": "TypeScript",
"bytes": "797437"
}
],
"symlink_target": ""
} |
"""
Unit tests for standard_names pacakge.
"""
| {
"content_hash": "21ddecbef07af51d651a634383641037",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 15.666666666666666,
"alnum_prop": 0.6808510638297872,
"repo_name": "csdms/standard_names",
"id": "15615c3c4c417ff3148c3ee84f927a8f8c7c7e16",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "standard_names/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1359"
},
{
"name": "Makefile",
"bytes": "2470"
},
{
"name": "PowerShell",
"bytes": "3442"
},
{
"name": "Python",
"bytes": "145367"
},
{
"name": "Shell",
"bytes": "1728"
}
],
"symlink_target": ""
} |
"""Deployment script for CWV in GA4 solution.
Deploys the SQL tables and scripts needed to collect CWVs according to the
standard set in https://web.dev/vitals-ga4/ as well as a cloud run function
for alerting.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import subprocess
import sys
import time
from typing import List
import google.api_core.exceptions
import google.auth
from google.auth.credentials import Credentials
from google.cloud import bigquery
from google.cloud import bigquery_datatransfer
from google.cloud import eventarc
from google.cloud import service_usage_v1
from google.cloud.eventarc_v1.types.trigger import CloudRun
from google.cloud.eventarc_v1.types.trigger import Destination
from google.cloud.eventarc_v1.types.trigger import EventFilter
from googleapiclient import discovery
def enable_services(credentials: Credentials, project_id: str):
"""Enables the services required to use the solution.
Args:
credentials: the Google credentials to use to authenticate.
project_id: the project the services will be enabled for.
"""
crm = discovery.build('cloudresourcemanager', 'v3')
project = crm.projects().get(name='projects/' + project_id).execute()
client = service_usage_v1.ServiceUsageClient(credentials=credentials)
request = service_usage_v1.BatchEnableServicesRequest()
request.parent = project['name']
request.service_ids = [
'compute.googleapis.com', 'bigquerydatatransfer.googleapis.com'
]
operation = client.batch_enable_services(request=request)
try:
operation.result()
except google.api_core.GoogleAPICallError as ex:
raise SystemExit('Unable to enable the required services. Please check the'
' logs and resolve the issues found there.') from ex
def get_gcp_regions(credentials: Credentials, project_id: str) -> List[str]:
"""Fetches the list of available GCP regions and returns a list of str.
Args:
credentials: the Google credentials to use to authenticate.
project_id: The project to use when making the query.
Returns:
A list of region names in str format.
"""
regions = []
service = discovery.build('compute', 'v1', credentials=credentials)
request = service.regions().list(project=project_id)
while request is not None:
response = request.execute()
for region in response['items']:
if 'name' in region and region['name']:
regions.append(region['name'])
if 'nextPageToken' in response:
request = service.regions().list(pageToken=response['nextPageToken'])
else:
request = None
return regions
def delete_scheduled_query(display_name: str, project_id: str, region: str):
"""Deletes the BigQuery scheduled queries with the given display name.
Please note that the display name of a BigQuery scheduled query is not
unique. This means that multiple queries can be deleted.
Args:
display_name: the name of the config to delete.
project_id: the project to delete the query from.
region: the region the query is stored in.
"""
transfer_client = bigquery_datatransfer.DataTransferServiceClient()
parent = transfer_client.common_location_path(project=project_id,
location=region)
transfer_config_req = bigquery_datatransfer.ListTransferConfigsRequest(
parent=parent, data_source_ids=['scheduled_query'])
configs = transfer_client.list_transfer_configs(request=transfer_config_req)
for config in configs:
if config.display_name == display_name:
transfer_client.delete_transfer_config(name=config.name)
def deploy_scheduled_materialize_query(project_id: str,
credentials: Credentials, region: str,
ga_property: str,
service_account: str) -> None:
"""Deploys the query to create the materialized CWV summary table.
The scheduled query is given the name "Update Web Vitals Summary" and any
other scheduled query with this name will be deleted before the new one is
deployed.
Args:
project_id: The project to deploy the query to.
region: the region of the dataset used for the GA export.
ga_property: The GA property used to collect the CWV data.
"""
display_name = 'Update Web Vitals Summary'
materialize_query = f"""
-- Copyright 2021 Google LLC
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
-- https://www.apache.org/licenses/LICENSE-2.0
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Materialize Web Vitals metrics from GA4 event export data
CREATE OR REPLACE TABLE `{project_id}.analytics_{ga_property}.web_vitals_summary`
PARTITION BY DATE(event_timestamp)
CLUSTER BY metric_name
AS
SELECT
ga_session_id,
IF(
EXISTS(
SELECT 1
FROM UNNEST(events) AS e
WHERE e.event_name = 'first_visit'
),
'New user',
'Returning user') AS user_type,
IF(
(SELECT MAX(session_engaged) FROM UNNEST(events)) > 0, 'Engaged', 'Not engaged')
AS session_engagement,
evt.* EXCEPT (session_engaged, event_name),
event_name AS metric_name,
FORMAT_TIMESTAMP('%Y%m%d', event_timestamp) AS event_date
FROM
(
SELECT
ga_session_id,
ARRAY_AGG(custom_event) AS events
FROM
(
SELECT
ga_session_id,
STRUCT(
country,
device_category,
device_os,
traffic_medium,
traffic_name,
traffic_source,
page_path,
debug_target,
event_timestamp,
event_name,
metric_id,
IF(event_name = 'LCP', metric_value / 1000, metric_value)
AS metric_value,
user_pseudo_id,
session_engaged,
session_revenue) AS custom_event
FROM
(
SELECT
(SELECT value.int_value FROM UNNEST(event_params) WHERE key = 'ga_session_id')
AS ga_session_id,
(SELECT value.string_value FROM UNNEST(event_params) WHERE key = 'metric_id')
AS metric_id,
ANY_VALUE(device.category) AS device_category,
ANY_VALUE(device.operating_system) AS device_os,
ANY_VALUE(traffic_source.medium) AS traffic_medium,
ANY_VALUE(traffic_source.name) AS traffic_name,
ANY_VALUE(traffic_source.source) AS traffic_source,
ANY_VALUE(
REGEXP_SUBSTR(
(SELECT value.string_value FROM UNNEST(event_params) WHERE key = 'page_location'),
r'^[^?]+')) AS page_path,
ANY_VALUE(
(SELECT value.string_value FROM UNNEST(event_params) WHERE key = 'debug_target'))
AS debug_target,
ANY_VALUE(user_pseudo_id) AS user_pseudo_id,
ANY_VALUE(geo.country) AS country,
ANY_VALUE(event_name) AS event_name,
SUM(ecommerce.purchase_revenue) AS session_revenue,
MAX(
(
SELECT
COALESCE(
value.double_value,
value.int_value,
CAST(value.string_value AS NUMERIC))
FROM UNNEST(event_params)
WHERE key = 'session_engaged'
)) AS session_engaged,
TIMESTAMP_MICROS(MAX(event_timestamp)) AS event_timestamp,
MAX(
(
SELECT COALESCE(value.double_value, value.int_value)
FROM UNNEST(event_params)
WHERE key = 'metric_value'
)) AS metric_value,
FROM
# Replace source table name
`{project_id}.analytics_{ga_property}.events_*`
WHERE
event_name IN ('LCP', 'FID', 'CLS', 'first_visit', 'purchase')
GROUP BY
1, 2
)
)
WHERE
ga_session_id IS NOT NULL
GROUP BY ga_session_id
)
CROSS JOIN UNNEST(events) AS evt
WHERE evt.event_name NOT IN ('first_visit', 'purchase');
"""
delete_scheduled_query(display_name=display_name,
project_id=project_id,
region=region)
transfer_client = bigquery_datatransfer.DataTransferServiceClient(
credentials=credentials)
parent = transfer_client.common_location_path(project=project_id,
location=region)
transfer_config = bigquery_datatransfer.TransferConfig(
display_name=display_name,
data_source_id='scheduled_query',
params={
'query': materialize_query,
},
schedule='every 24 hours',
)
transfer_config = transfer_client.create_transfer_config(
bigquery_datatransfer.CreateTransferConfigRequest(
parent=parent,
transfer_config=transfer_config,
service_account_name=service_account))
# wait 30 seconds for the query to complete. Otherwise anything depending on
# the table being created will fail.
time.sleep(30)
def deploy_p75_procedure(project_id: str, ga_property: str):
"""Deploys the p75 stored procedure to BigQuery.
The p75 procedure is used by the email alerting function to find if the CWV
values have crossed the threshold set by the user.
Args:
project_id: The GCP project ID the procedure is being deployed to.
ga_property: The GA property used to collect the CWV data.
"""
p75_procedure = f"""CREATE OR REPLACE
PROCEDURE analytics_{ga_property}.get_cwv_p75_for_date(start_date date, num_days INT64) BEGIN
SELECT
metric_name, APPROX_QUANTILES(metric_value, 100)[OFFSET(75)] AS p75, COUNT(1) AS count
FROM `{project_id}.analytics_{ga_property}.web_vitals_summary`
WHERE
PARSE_DATE('%Y%m%d', event_date)
BETWEEN DATE_SUB(start_date, INTERVAL num_days DAY)
AND DATE_SUB(start_date, INTERVAL 1 DAY)
GROUP BY 1;
END
"""
bq_client = bigquery.Client()
query_job = bq_client.query(p75_procedure)
def query_done_callback(job):
if job.error_result:
print('There was an error deploying the p75 procedure: ', file=sys.stderr)
for error_key in job.error_result.keys():
for error in job.error_result[error_key]:
print(error, file=sys.stderr)
raise SystemExit('Please check the GCP logs and try again.')
query_job.add_done_callback(query_done_callback)
query_job.result()
def deploy_cloudrun_alerter(ga_property: str, region: str, lcp_threshold: int,
cls_threshold: float, fid_threshold: int,
email_server: str, email_user: str,
email_password: str, email_from: str,
alert_recipients: str):
"""Deploys the Cloud Run function that sends the alert email.
The GCP API doesn't provide a function to deploy functions from source, so
we shell out to the gcloud command. That command sets the environment
variables for the function to use and deploys it from the source in the
notifications directory.
Note: this won't work if the script is moved, so we fail if the
notifications directory is not found.
Args:
ga_property: The GA property used to collect the CWV data.
region: The region to deploy the function to. This must be the same region
the CWV data is stored in in BQ.
lcp_threshold: The threshold LCP value to send the email for.
cls_threshold: The threshold CLS value to send the email for.
fid_threshold: The threshold FID value to send the email for.
email_server: The SMTP server to use to send the email.
email_user: The user name to use when authenticating with the server.
email_password: The password to use when authenticating with the server.
email_from: The email address to use in the alert's From field.
alert_recipients: A comma-separated list of emails to send the alert to.
Raises:
SystemExit: Raised when the deployment fails.
"""
# : used as a separator to allow a comma-separated list of alert recipients.
env_vars = (f'^:^ANALYTICS_ID={ga_property}:'
f'GOOD_LCP={lcp_threshold}:'
f'GOOD_CLS={cls_threshold}:'
f'GOOD_FID={fid_threshold}:'
f'EMAIL_SERVER={email_server}:'
f'EMAIL_USER={email_user}:'
f'EMAIL_PASS={email_password}:'
f'EMAIL_FROM={email_from}:'
f'ALERT_RECEIVERS={alert_recipients}')
source_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'notifications')
if not os.path.isdir(source_dir):
print('Source directory for the email notification function not found.',
file=sys.stderr)
raise SystemExit('Please ensure the deploy script is in the distribution '
'directory as delivered.')
try:
subprocess.run([
'gcloud', 'run', 'deploy', 'cwv-alerting-service', f'--region={region}',
f'--set-env-vars={env_vars}', '--source', source_dir
],
check=True)
except subprocess.CalledProcessError as cpe:
raise SystemExit('Deploying the email alerting function failed. Please '
'check the messages above and correct any issues before '
'trying again.') from cpe
def create_cloudrun_trigger(project_id: str, region: str, service_account: str):
"""Creates the trigger to check if an alert email should be sent.
This creates a trigger named cwv-alert-email-trigger that fires when a
BigQuery insert job completes. It deletes the trigger with the same name if
it exists first.
Args:
project_id: The GCP project ID the trigger is being deployed to.
region: The region to create the trigger in.
service_account: The service account to assign the trigger.
"""
trigger_name = (f'projects/{project_id}/locations/{region}/triggers/'
'cwv-alert-email-trigger')
eventarc_client = eventarc.EventarcClient()
eventarc_client.delete_trigger(name=trigger_name, allow_missing=True)
destination = Destination(
cloud_run=CloudRun(service='cwv-alerting-service', region=region))
event_filters = [
EventFilter(attribute='type', value='google.cloud.audit.log.v1.written'),
EventFilter(attribute='serviceName', value='bigquery.googleapis.com'),
EventFilter(attribute='methodName',
value='google.cloud.bigquery.v2.JobService.InsertJob')
]
new_trigger = eventarc.Trigger(name=trigger_name,
destination=destination,
service_account=service_account,
event_filters=event_filters)
parent = eventarc_client.common_location_path(project=project_id,
location=region)
try:
eventarc_client.create_trigger(parent=parent,
trigger=new_trigger,
trigger_id='cwv-alert-email-trigger')
except Exception as ex:
print(ex, file=sys.stderr)
raise SystemExit(
'The event trigger was not created. Please check the '
'errors above and ensure the service account you are using'
' has the correct roles (e.g. oles/eventarc.eventReceiver') from ex
def get_default_service_account_email(project_id: str,
credentials: Credentials) -> str:
"""Gets the email address for the default iam service account.
Args:
project_id: The GCP project to get the default account for.
credentials: The credentials to use to authenticate.
Returns:
The email address of the default compute iam service account.
"""
service = discovery.build('iam', 'v1', credentials=credentials)
service_accounts = service.projects().serviceAccounts().list(
name=f'projects/{project_id}').execute()
for account in service_accounts['accounts']:
display_name = account['displayName'].lower()
if display_name.find('default') != -1:
return account['email']
return ''
def add_roles_to_service_account(service_account: str, project_id: str,
credentials: Credentials) -> None:
"""Creates a new role with the permissions required to deploy the solution
and it to the passed service account.
The service account needs to have the correct permissions, and this is the
most straightforward way of ensuring that. The permissions in the new role are
- bigquery.tables.get
- bigquery.tables.get
- bigquery.tables.getData
- bigquery.tables.list
- bigquery.tables.create
- bigquery.tables.update
- bigquery.tables.updateData
- bigquery.jobs.list
- bigquery.jobs.create
- bigquery.transfers.update
- eventarc.events.receiveAuditLogWritten
Args:
service_account: The service account to add the role to.
project_id: The project the new role will be created in.
credentials: The credentials to authenticate the new role request with.
"""
service = discovery.build('iam', 'v1', credentials=credentials)
role_resp = service.projects().roles().list(
parent=f'projects/{project_id}').execute()
current_roles = role_resp.get('roles', [])
role = None
# if the role already exists, it's an error to try and create it again
for r in current_roles:
if r['name'].endswith('cwv_in_ga4_deployer'):
role = r
break
if not role:
role = service.projects().roles().create(
parent=f'projects/{project_id}',
body={
'roleId': 'cwv_in_ga4_deployer',
'role': {
'title': 'CWV in GA4 Deployment role',
'description': 'Used to deploy the CWV ni GA4 solution.',
'includedPermissions': [
'bigquery.tables.get', 'bigquery.tables.getData',
'bigquery.tables.list', 'bigquery.tables.create',
'bigquery.tables.update', 'bigquery.tables.updateData',
'bigquery.jobs.list', 'bigquery.jobs.create',
'bigquery.transfers.update',
'eventarc.events.receiveAuditLogWritten'
],
'stage': 'GA'
}
}).execute()
if not role:
raise SystemExit('There was an issue trying to create the role required for'
' the BigQuery scheduled queries. Please check the cloud '
'logs, correct the issue, and try again.')
service = discovery.build('cloudresourcemanager',
'v1',
credentials=credentials)
policy = service.projects().getIamPolicy(resource=project_id,
body={
'options': {
'requestedPolicyVersion': 1
}
}).execute()
policy['bindings'].append({
'role': role['name'],
'members': [f'serviceAccount:{service_account}']
})
service.projects().setIamPolicy(resource=project_id, body={
"policy": policy
}).execute()
def main():
"""The main entry point.
Command line arguments are parsed and any missing information is gathered
before running through the deployment steps.
Raises:
SystemExit: Raised when a non-GA4 property is entered.
"""
arg_parser = argparse.ArgumentParser(
description='Deploys the CWV in GA solution')
arg_parser.add_argument('-g',
'--ga-property',
type=int,
help=('The GA property ID to use when looking for '
'exports in big query.'))
arg_parser.add_argument('-r',
'--region',
help='The region GA data is being exported to.')
arg_parser.add_argument('-l',
'--lcp-threshold',
default=2500,
help=('The value to use as the threshold for a good '
'LCP score in ms (default %(default)d).'))
arg_parser.add_argument('-f',
'--fid-threshold',
default=100,
help=('The value to use as a threshold for a good FID'
' score in ms (default %(default)d)'))
arg_parser.add_argument('-c',
'--cls-threshold',
default=0.1,
help=('The value to use as a threshold for a good CLS'
' score (unit-less)(default %(default)1.1f)'))
arg_parser.add_argument('-s',
'--email-server',
help=('The address of the email server to use to send'
' alerts.'))
arg_parser.add_argument('-u',
'--email-user',
help=('The username to use to authenticate with the '
'email server.'))
arg_parser.add_argument('-p',
'--email-password',
help=('The password to use to authenticate with the '
'email server'))
arg_parser.add_argument('-e',
'--email-from',
help=('The email address used in the from field of '
'the alert'))
arg_parser.add_argument('-a',
'--alert-recipients',
help=('A comma-separated list of email addresses to '
'send the alerts to.'))
arg_parser.add_argument('-i',
'--iam-service-account',
help=('The email of the IAM service account to use '
'when authenticating calls to the email '
'alerting function. Please note that this '
'account requires roles/eventarc.eventReceiver.'
' If not provided, the default compute service '
'account is used.'))
arg_parser.add_argument('--email-alert',
help='Flag for deploying the email alerting service',
action='store_true',
dest='email_alert')
arg_parser.add_argument('--no-email-alert',
help='Flag to not deploy the email alerting service',
action='store_false',
dest='email_alert')
arg_parser.set_defaults(email_alert=True)
args = arg_parser.parse_args()
credentials, project_id = google.auth.default()
if not project_id:
project_id = os.environ['GOOGLE_CLOUD_PROJECT']
enable_services(credentials, project_id)
if not args.region:
args.region = input(
'Which region should be deployed to (type list for a list)? ').strip()
while args.region == 'list':
region_list = get_gcp_regions(credentials, project_id)
print('\n'.join(region_list))
args.region = (input(
'Which region is the GA export in (list for a list of regions)? ').
strip())
if not args.ga_property:
args.ga_property = (input(
'Please enter the GA property ID you are collecting CWV data with: ').
strip())
if not args.ga_property.isdigit():
raise SystemExit('Only GA4 properties are supported at this time.')
# the options are a service account email is provided with the default
# credentials, the word default is provided in place of an email address, or
# the service_account_email field isn't present at all on the credentials.
if not args.iam_service_account:
input_msg = 'Please enter the email of the service account to use: '
if hasattr(credentials, 'service_account_email'):
if credentials.service_account_email == 'default':
args.iam_service_account = get_default_service_account_email(
project_id, credentials)
else:
args.iam_service_account = credentials.service_account_email
input_msg = (
'Please note: using the default service account, '
f'{args.iam_service_account}, will result in a new role being '
'created to allow for the creation and execution of BigQuery '
'scheduled queries.\n' + input_msg)
else:
input_msg = ('Please note: your default credentials do not provide a '
'service account. You must provide one here.\n' + input_msg)
user_service_account = input(input_msg).strip()
if user_service_account:
args.iam_service_account = user_service_account
if args.iam_service_account:
add_roles_to_service_account(args.iam_service_account, project_id,
credentials)
else:
raise SystemExit(
'You must provide a service account for deploying and '
'running the solution to continue. Please create a service account '
'and try again.')
deploy_scheduled_materialize_query(project_id, credentials, args.region,
args.ga_property, args.iam_service_account)
if args.email_alert:
if not args.email_server:
args.email_server = input(
'Please enter the address of the email server to use to send alerts '
'(leave empty to not deploy the email alerting function): ').strip()
if args.email_server:
if not args.email_user:
args.email_user = input(
'Please enter the username for authenticating with the email '
'server: ').strip()
if not args.email_password:
args.email_password = input(
'Please enter the password for authenticating with the email '
'server: ').strip()
if not args.email_from:
args.email_from = input(
'Please enter the email address to use in the FROM field: ').strip(
)
if not args.alert_recipients:
args.alert_recipients = input(
'Please enter a comma-separated list of email addresses to send '
'the alerts to: ').strip()
# only ask for CWV thresholds if no args were used, defaults will
# otherwise be used
if len(sys.argv) == 1:
args.lcp_threshold = input(
'Please enter the alert threshold for LCP in ms (default 2500): '
).strip()
if not args.lcp_threshold:
args.lcp_threshold = 2055
args.fid_threshold = input(
'Please enter the alert threshold for FID in ms (default 100): '
).strip()
if not args.fid_threshold:
args.fid_threshold = 100
args.cls_threshold = input(
'Please enter the threshold for CLS (default 0.1): ').strip()
if not args.cls_threshold:
args.cls_threshold = 0.1
deploy_p75_procedure(project_id, args.ga_property)
if args.email_server:
deploy_cloudrun_alerter(args.ga_property, args.region, args.lcp_threshold,
args.cls_threshold, args.fid_threshold,
args.email_server, args.email_user,
args.email_password, args.email_from,
args.alert_recipients)
create_cloudrun_trigger(project_id, args.region, args.iam_service_account)
if __name__ == '__main__':
main()
| {
"content_hash": "2f455838b13cba407337675654217da7",
"timestamp": "",
"source": "github",
"line_count": 706,
"max_line_length": 100,
"avg_line_length": 40.42492917847026,
"alnum_prop": 0.6130343377715487,
"repo_name": "google/cwv_in_ga4",
"id": "8710ced67307e436270257fb9439ea01f472c9bc",
"size": "28563",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "deploy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "19409"
},
{
"name": "Python",
"bytes": "28563"
},
{
"name": "Shell",
"bytes": "755"
}
],
"symlink_target": ""
} |
"""
Server so nltk doesn't initialize for every meme request.
Author: Jack Romo <[email protected]>
"""
from flask import Flask, request
from main import DogeGen
class DogeServer(object):
"""
Server to generate Doge images.
"""
app = Flask(__name__)
dogegen = None
@staticmethod
def run_server(resources, host_name='0.0.0.0', port=5000):
"""
Run the internal server.
Args:
resources (str): Resources directory, eg. "./resources".
host_name (str): Host name of server.
port (int): Port of server.
"""
if not DogeServer.dogegen:
DogeServer.dogegen = DogeGen(resources)
DogeServer.app.run(debug=True, host=host_name, port=port)
@staticmethod
@app.route('/makememe')
def makememe():
"""
Make a meme upon a server GET request.
Returns:
str: Directory of image.
"""
inptext = request.args.get('inptext')
outdir = request.args.get('outdir')
maxphrases = int(request.args.get('maxphrases'))
DogeServer.dogegen.make_meme(inptext, outdir, maxphrases)
return outdir
@staticmethod
@app.route('/shutdown', methods=['POST'])
def shutdown():
"""
Shut down server internally.
Returns:
str: Status string.
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return "Shutting down."
| {
"content_hash": "1619d02667d6a12ae209dd21823089a0",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 70,
"avg_line_length": 25.64516129032258,
"alnum_prop": 0.5836477987421383,
"repo_name": "MemeTrash/DogeGen",
"id": "d1e3566418e2e32c2057cb4dd166f0919c1b1b27",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dogegen/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13753"
},
{
"name": "Shell",
"bytes": "422"
}
],
"symlink_target": ""
} |
from schematics.models import Model
from schematics.types import StringType
from .models import Route
class DirectionsResponse(object):
"""
The Directions Response class represents a response.
The two attributes include:
1. Status - Represents the status of the request
2. Routes - Represents an array of routes from origin to destination
"""
def __init__(self, status, routes):
self._status = status
self._routes = routes
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def routes(self):
return self._routes
@routes.setter
def routes(self, routes):
self._routes = routes
| {
"content_hash": "7f911e87dd4da8c977cf05c8b44b6b57",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 22.096774193548388,
"alnum_prop": 0.7299270072992701,
"repo_name": "apranav19/pydirections",
"id": "9522f7d1f858cbcc2383a0b8d51f87d34de52732",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydirections/route_responses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11973"
}
],
"symlink_target": ""
} |
"""
Test suite for SocketServer.py.
"""
import contextlib
import imp
import os
import select
import signal
import socket
import tempfile
import unittest
import SocketServer
import test.test_support
from test.test_support import reap_children, reap_threads, verbose
try:
import threading
except ImportError:
threading = None
test.test_support.requires("network")
TEST_STR = "hello world\n"
HOST = test.test_support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
def receive(sock, n, timeout=20):
r, w, x = select.select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError, "timed out on %r" % (sock,)
if HAVE_UNIX_SOCKETS:
class ForkingUnixStreamServer(SocketServer.ForkingMixIn,
SocketServer.UnixStreamServer):
pass
class ForkingUnixDatagramServer(SocketServer.ForkingMixIn,
SocketServer.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
pid = os.fork()
if pid == 0:
# Don't throw an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(20) # Kill deadlocks after 20 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except os.error:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
if os.name == 'os2':
dir = '\socket'
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
if os.name == 'os2':
# AF_UNIX socket names on OS/2 require a specific prefix
# which can't include a drive letter and must also use
# backslashes as directory separators
if fn[1] == ':':
fn = fn[2:]
if fn[0] in (os.sep, os.altsep):
fn = fn[1:]
if os.sep == '/':
fn = fn.replace(os.sep, os.altsep)
else:
fn = fn.replace(os.altsep, os.sep)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print "creating server"
server = MyServer(addr, MyHandler)
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", svrcls
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
for i in range(3):
if verbose: print "test client", i
testfunc(svrcls.address_family, addr)
if verbose: print "waiting for server"
server.shutdown()
t.join()
if verbose: print "done"
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(SocketServer.TCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(SocketServer.ThreadingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_UNIX_SOCKETS:
def test_UnixStreamServer(self):
self.run_server(SocketServer.UnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingUnixStreamServer(self):
self.run_server(SocketServer.ThreadingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(SocketServer.UDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(SocketServer.ThreadingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
if HAVE_FORKING:
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
# if HAVE_UNIX_SOCKETS:
# def test_UnixDatagramServer(self):
# self.run_server(SocketServer.UnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# def test_ThreadingUnixDatagramServer(self):
# self.run_server(SocketServer.ThreadingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# if HAVE_FORKING:
# def test_ForkingUnixDatagramServer(self):
# self.run_server(SocketServer.ForkingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(SocketServer.TCPServer):
pass
class MyHandler(SocketServer.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
def test_main():
if imp.lock_held():
# If the import lock is held, the threads will hang
raise unittest.SkipTest("can't run when import lock is held")
test.test_support.run_unittest(SocketServerTest)
if __name__ == "__main__":
test_main()
signal_alarm(3) # Shutdown shouldn't take more than 3 seconds.
| {
"content_hash": "b40d83c1f5bb6485178c94fbae001237",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 76,
"avg_line_length": 35.08098591549296,
"alnum_prop": 0.5463213891398173,
"repo_name": "google/google-ctf",
"id": "cd7a70919e09658de38abd89f80a5dc95c8b8d96",
"size": "9963",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_socketserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "508"
},
{
"name": "Assembly",
"bytes": "107617"
},
{
"name": "BASIC",
"bytes": "6068"
},
{
"name": "Batchfile",
"bytes": "1032"
},
{
"name": "Blade",
"bytes": "14530"
},
{
"name": "C",
"bytes": "1481904"
},
{
"name": "C++",
"bytes": "2139472"
},
{
"name": "CMake",
"bytes": "11595"
},
{
"name": "CSS",
"bytes": "172375"
},
{
"name": "Dart",
"bytes": "6282"
},
{
"name": "Dockerfile",
"bytes": "232352"
},
{
"name": "EJS",
"bytes": "92308"
},
{
"name": "Emacs Lisp",
"bytes": "2668"
},
{
"name": "GDB",
"bytes": "273"
},
{
"name": "GLSL",
"bytes": "33392"
},
{
"name": "Go",
"bytes": "3031142"
},
{
"name": "HTML",
"bytes": "467647"
},
{
"name": "Java",
"bytes": "174199"
},
{
"name": "JavaScript",
"bytes": "2643200"
},
{
"name": "Lua",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "149152"
},
{
"name": "NSIS",
"bytes": "2800"
},
{
"name": "Nix",
"bytes": "139"
},
{
"name": "PHP",
"bytes": "311900"
},
{
"name": "Perl",
"bytes": "32742"
},
{
"name": "Pug",
"bytes": "8752"
},
{
"name": "Python",
"bytes": "1756592"
},
{
"name": "Red",
"bytes": "188"
},
{
"name": "Rust",
"bytes": "541267"
},
{
"name": "Sage",
"bytes": "39814"
},
{
"name": "Shell",
"bytes": "382149"
},
{
"name": "Smali",
"bytes": "2316656"
},
{
"name": "Starlark",
"bytes": "8216"
},
{
"name": "SystemVerilog",
"bytes": "16466"
},
{
"name": "VCL",
"bytes": "895"
},
{
"name": "Verilog",
"bytes": "7230"
},
{
"name": "Vim Script",
"bytes": "890"
},
{
"name": "Vue",
"bytes": "10248"
}
],
"symlink_target": ""
} |
import functools
import json
import re
from django.conf import settings
from django.contrib.admindocs.views import simplify_regex
from django.core.exceptions import ViewDoesNotExist
from django.core.management.base import BaseCommand, CommandError
try:
from django.urls import RegexURLPattern, RegexURLResolver, LocaleRegexURLResolver
except ImportError:
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver, LocaleRegexURLResolver
from django.utils import translation
from django_extensions.management.color import color_style, no_style
from django_extensions.management.utils import signalcommand
FMTR = {
'dense': "{url}\t{module}\t{url_name}\t{decorator}",
'table': "{url},{module},{url_name},{decorator}",
'aligned': "{url},{module},{url_name},{decorator}",
'verbose': "{url}\n\tController: {module}\n\tURL Name: {url_name}\n\tDecorators: {decorator}\n",
'json': '',
'pretty-json': ''
}
class Command(BaseCommand):
help = "Displays all of the url matching routes for the project."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
"--unsorted", "-u", action="store_true", dest="unsorted",
help="Show urls unsorted but same order as found in url patterns")
parser.add_argument(
"--language", "-l", dest="language",
help="Only show this language code (useful for i18n_patterns)")
parser.add_argument(
"--decorator", "-d", action="append", dest="decorator", default=[],
help="Show the presence of given decorator on views")
parser.add_argument(
"--format", "-f", dest="format_style", default="dense",
help="Style of the output. Choices: %s" % FMTR.keys())
parser.add_argument(
"--urlconf", "-c", dest="urlconf", default="ROOT_URLCONF",
help="Set the settings URL conf variable to use")
@signalcommand
def handle(self, *args, **options):
if args:
appname, = args
if options.get('no_color', False):
style = no_style()
else:
style = color_style()
if getattr(settings, 'ADMIN_FOR', None):
settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
self.LANGUAGES = getattr(settings, 'LANGUAGES', ((None, None), ))
language = options.get('language', None)
if language is not None:
translation.activate(language)
self.LANGUAGES = [(code, name) for code, name in self.LANGUAGES if code == language]
decorator = options.get('decorator')
if not decorator:
decorator = ['login_required']
format_style = options.get('format_style')
if format_style not in FMTR:
raise CommandError("Format style '%s' does not exist. Options: %s" % (format_style, FMTR.keys()))
pretty_json = format_style == 'pretty-json'
if pretty_json:
format_style = 'json'
fmtr = FMTR[format_style]
urlconf = options.get('urlconf')
views = []
for settings_mod in settings_modules:
if not hasattr(settings_mod, urlconf):
raise CommandError("Settings module {} does not have the attribute {}.".format(settings_mod, urlconf))
try:
urlconf = __import__(getattr(settings_mod, urlconf), {}, {}, [''])
except Exception as e:
if options.get('traceback', None):
import traceback
traceback.print_exc()
print(style.ERROR("Error occurred while trying to load %s: %s" % (getattr(settings_mod, urlconf), str(e))))
continue
view_functions = self.extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, url_name) in view_functions:
if hasattr(func, '__globals__'):
func_globals = func.__globals__
elif hasattr(func, 'func_globals'):
func_globals = func.func_globals
else:
func_globals = {}
decorators = [d for d in decorator if d in func_globals]
if isinstance(func, functools.partial):
func = func.func
decorators.insert(0, 'functools.partial')
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, '__class__'):
func_name = '%s()' % func.__class__.__name__
else:
func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func))
module = '{0}.{1}'.format(func.__module__, func_name)
url_name = url_name or ''
url = simplify_regex(regex)
decorator = ', '.join(decorators)
if format_style == 'json':
views.append({"url": url, "module": module, "name": url_name, "decorators": decorator})
else:
views.append(fmtr.format(
module='{0}.{1}'.format(style.MODULE(func.__module__), style.MODULE_NAME(func_name)),
url_name=style.URL_NAME(url_name),
url=style.URL(url),
decorator=decorator,
))
if not options.get('unsorted', False) and format_style != 'json':
views = sorted(views)
if format_style == 'aligned':
views = [row.split(',', 3) for row in views]
widths = [len(max(columns, key=len)) for columns in zip(*views)]
views = [
' '.join('{0:<{1}}'.format(cdata, width) for width, cdata in zip(widths, row))
for row in views
]
elif format_style == 'table':
# Reformat all data and show in a table format
views = [row.split(',', 3) for row in views]
widths = [len(max(columns, key=len)) for columns in zip(*views)]
table_views = []
header = (style.MODULE_NAME('URL'), style.MODULE_NAME('Module'), style.MODULE_NAME('Name'), style.MODULE_NAME('Decorator'))
table_views.append(
' | '.join('{0:<{1}}'.format(title, width) for width, title in zip(widths, header))
)
table_views.append('-+-'.join('-' * width for width in widths))
for row in views:
table_views.append(
' | '.join('{0:<{1}}'.format(cdata, width) for width, cdata in zip(widths, row))
)
# Replace original views so we can return the same object
views = table_views
elif format_style == 'json':
if pretty_json:
return json.dumps(views, indent=4)
return json.dumps(views)
return "\n".join([v for v in views]) + "\n"
def extract_views_from_urlpatterns(self, urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if isinstance(p, RegexURLPattern):
try:
if not p.name:
name = p.name
elif namespace:
name = '{0}:{1}'.format(namespace, p.name)
else:
name = p.name
views.append((p.callback, base + p.regex.pattern, name))
except ViewDoesNotExist:
continue
elif isinstance(p, RegexURLResolver):
try:
patterns = p.url_patterns
except ImportError:
continue
if namespace and p.namespace:
_namespace = '{0}:{1}'.format(namespace, p.namespace)
else:
_namespace = (p.namespace or namespace)
if isinstance(p, LocaleRegexURLResolver):
for langauge in self.LANGUAGES:
with translation.override(langauge[0]):
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=_namespace))
else:
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=_namespace))
elif hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern, p.name))
except ViewDoesNotExist:
continue
elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=namespace))
else:
raise TypeError("%s does not appear to be a urlpattern object" % p)
return views
| {
"content_hash": "ad047e7c9b358cf551015fea99474fb3",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 135,
"avg_line_length": 42.19457013574661,
"alnum_prop": 0.5376943699731903,
"repo_name": "vicky2135/lucious",
"id": "94382a602e67e65147c422f4e22fb7296d8a19ca",
"size": "9349",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/django_extensions/management/commands/show_urls.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896683"
},
{
"name": "C++",
"bytes": "52230"
},
{
"name": "CSS",
"bytes": "1169533"
},
{
"name": "HTML",
"bytes": "1104983"
},
{
"name": "JavaScript",
"bytes": "1055140"
},
{
"name": "Makefile",
"bytes": "145238"
},
{
"name": "Python",
"bytes": "55993261"
},
{
"name": "Shell",
"bytes": "40487"
}
],
"symlink_target": ""
} |
from twisted.python import versions
version = versions.Version(__name__[:__name__.rfind('.')], 0, 3, 0)
| {
"content_hash": "004abcb83267ae93cda6a89e00a10355",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 67,
"avg_line_length": 52,
"alnum_prop": 0.6634615384615384,
"repo_name": "twisted/sine",
"id": "6200cac911ff854915bb836bcddd432a970f2bf2",
"size": "175",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sine/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "379934"
}
],
"symlink_target": ""
} |
from django.db import models
from thing.models.constellation import Constellation
class System(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=32)
constellation = models.ForeignKey(Constellation)
class Meta:
app_label = 'thing'
ordering = ('name'),
def __unicode__(self):
return self.name
| {
"content_hash": "eb31f02121baa7423b9705d0cd6ba010",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 22.58823529411765,
"alnum_prop": 0.6796875,
"repo_name": "Gillingham/evething",
"id": "e1de60da4a73291a1d3ffd55e0734368f43b5705",
"size": "1904",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "thing/models/system.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "283015"
},
{
"name": "HTML",
"bytes": "161685"
},
{
"name": "JavaScript",
"bytes": "359029"
},
{
"name": "Nginx",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "605227"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import os
import six
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django_extensions.db.models import (
TimeStampedModel,
TitleSlugDescriptionModel,
)
from salt.version import __version__ as salt_version
from stackdio.api.cloud.providers.base import GroupNotFoundException
from stackdio.api.cloud.utils import get_cloud_provider_choices, get_provider_driver_class
from stackdio.api.formulas.models import FormulaVersion
from stackdio.core.fields import JSONField
from stackdio.core.queryset_transform import TransformQuerySet
from stackdio.core.utils import recursive_update
logger = logging.getLogger(__name__)
FILESYSTEM_CHOICES = (
('ext2', 'ext2'),
('ext3', 'ext3'),
('ext4', 'ext4'),
('fuse', 'fuse'),
('xfs', 'xfs'),
)
_cloudprovider_model_permissions = ()
_cloudprovider_object_permissions = ('view', 'admin')
@six.python_2_unicode_compatible
class CloudProvider(models.Model):
model_permissions = _cloudprovider_model_permissions
object_permissions = _cloudprovider_object_permissions
class Meta:
default_permissions = tuple(set(_cloudprovider_model_permissions +
_cloudprovider_object_permissions))
PROVIDER_CHOICES = get_cloud_provider_choices()
name = models.CharField(
'Name',
max_length=32,
choices=PROVIDER_CHOICES,
unique=True)
def __str__(self):
return six.text_type(self.name)
def get_driver(self):
# determine the provider driver class
provider_class = get_provider_driver_class(self)
# Return an instance of the provider driver
return provider_class()
_cloudaccount_model_permissions = (
'create',
'admin',
)
_cloudaccount_object_permissions = (
'view',
'update',
'delete',
'admin',
)
@six.python_2_unicode_compatible
class CloudAccount(TimeStampedModel, TitleSlugDescriptionModel):
model_permissions = _cloudaccount_model_permissions
object_permissions = _cloudaccount_object_permissions
searchable_fields = ('title', 'description')
class Meta:
ordering = ('title',)
unique_together = ('title', 'provider')
default_permissions = tuple(set(_cloudaccount_model_permissions +
_cloudaccount_object_permissions))
# What is the type of provider (e.g., AWS, Rackspace, etc)
provider = models.ForeignKey('cloud.CloudProvider', verbose_name='Cloud Provider')
# Used to store the provider-specifc YAML that will be written
# to disk in settings.STACKDIO_CONFIG.salt_providers_dir
yaml = models.TextField()
# The region for this provider
# FOR EC2 CLASSIC
region = models.ForeignKey('CloudRegion', verbose_name='Region')
# Are we using VPC?
# FOR EC2 VPC
vpc_id = models.CharField('VPC ID', max_length=64, blank=True)
# If this is false, we won't create security groups on a per-stack basis.
create_security_groups = models.BooleanField('Create Security Groups', default=True)
# Grab the list of formula components
formula_components = GenericRelation('formulas.FormulaComponent')
# Grab the formula versions
formula_versions = GenericRelation('formulas.FormulaVersion')
# Properties for this account
global_orchestration_properties = JSONField('Global Orchestration Properties')
def __str__(self):
return six.text_type(self.title)
@property
def vpc_enabled(self):
return len(self.vpc_id) > 0
def get_driver(self):
# determine the provider driver class
provider_class = get_provider_driver_class(self.provider)
# Return an instance of the provider driver
return provider_class(self)
def get_config_file_path(self):
basedir = settings.STACKDIO_CONFIG.salt_providers_dir
if not os.path.isdir(basedir):
os.makedirs(basedir)
return os.path.join(basedir, '{}.conf'.format(self.slug))
def update_config(self):
"""
Writes the yaml configuration file for the given account object.
"""
# update the account object's security group information
security_groups = [sg.group_id for sg in self.security_groups.filter(
is_default=True
)]
account_yaml = yaml.safe_load(self.yaml)
account_yaml[self.slug]['securitygroupid'] = security_groups
self.yaml = yaml.safe_dump(account_yaml, default_flow_style=False)
self.save()
with open(self.get_config_file_path(), 'w') as f:
# update the yaml to include updated security group information
f.write(self.yaml)
def get_root_directory(self):
return os.path.join(settings.FILE_STORAGE_DIRECTORY, 'cloud', self.slug)
def get_formulas(self):
formulas = set()
for component in self.formula_components.all():
formulas.add(component.formula)
return list(formulas)
def get_full_pillar(self):
pillar_props = {}
# If any of the formulas we're using have default pillar
# data defined in its corresponding SPECFILE, we need to pull
# that into our stack pillar file.
# First get the unique set of formulas
formulas = self.get_formulas()
# for each unique formula, pull the properties from the SPECFILE
for formula in formulas:
# Grab the formula version
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
# Update the formula
formula.get_gitfs().update()
# Add it to the rest of the pillar
recursive_update(pillar_props, formula.properties(version))
# Add in properties that were supplied via the blueprint and during
# stack creation
recursive_update(pillar_props, self.global_orchestration_properties)
return pillar_props
@six.python_2_unicode_compatible
class CloudInstanceSize(TitleSlugDescriptionModel):
class Meta:
ordering = ('id',)
default_permissions = ()
# `title` field will be the type used by salt-cloud for the `size`
# parameter in the providers yaml file (e.g., 'Micro Instance' or
# '512MB Standard Instance'
# link to the type of provider for this instance size
provider = models.ForeignKey('cloud.CloudProvider',
verbose_name='Cloud Provider',
related_name='instance_sizes')
# The underlying size ID of the instance (e.g., t1.micro)
instance_id = models.CharField('Instance ID', max_length=64)
def __str__(self):
return six.text_type('{0} ({1})'.format(self.description, self.instance_id))
_cloudimage_model_permissions = (
'create',
'admin',
)
_cloudimage_object_permissions = (
'view',
'update',
'delete',
'admin',
)
@six.python_2_unicode_compatible
class CloudImage(TimeStampedModel, TitleSlugDescriptionModel):
model_permissions = _cloudimage_model_permissions
object_permissions = _cloudimage_object_permissions
searchable_fields = ('title', 'description')
class Meta:
ordering = ('title',)
unique_together = ('title', 'account')
default_permissions = tuple(set(_cloudimage_model_permissions +
_cloudimage_object_permissions))
# What cloud account is this under?
account = models.ForeignKey('cloud.CloudAccount', related_name='images')
# The underlying image id of this image (e.g., ami-38df83a')
image_id = models.CharField('Image ID', max_length=64)
# The default instance size of this image, may be overridden
# by the user at creation time
default_instance_size = models.ForeignKey('CloudInstanceSize',
verbose_name='Default Instance Size')
# The SSH user that will have default access to the box. Salt-cloud
# needs this to provision the box as a salt-minion and connect it
# up to the salt-master automatically.
ssh_user = models.CharField('SSH User', max_length=64)
def __str__(self):
return six.text_type(self.title)
def get_config_file_path(self):
basedir = settings.STACKDIO_CONFIG.salt_profiles_dir
if not os.path.isdir(basedir):
os.makedirs(basedir)
return os.path.join(basedir, '{}.conf'.format(self.slug))
def update_config(self):
"""
Writes the salt-cloud profile configuration file
"""
script = settings.STACKDIO_CONFIG.get('salt_bootstrap_script', 'bootstrap-salt')
script_args = settings.STACKDIO_CONFIG.get('salt_bootstrap_args',
'stable archive/{salt_version}')
profile_yaml = {
self.slug: {
'provider': self.account.slug,
'image': self.image_id,
'size': self.default_instance_size.title,
'ssh_username': self.ssh_user,
'script': script,
'script_args': script_args.format(salt_version=salt_version),
'sync_after_install': 'all',
# PI-44: Need to add an empty minion config until salt-cloud/701
# is fixed.
'minion': {},
}
}
profile_yaml = yaml.safe_dump(profile_yaml,
default_flow_style=False)
with open(self.get_config_file_path(), 'w') as f:
# update the yaml to include updated security group information
f.write(profile_yaml)
def get_driver(self):
return self.account.get_driver()
_snapshot_model_permissions = (
'create',
'admin',
)
_snapshot_object_permissions = (
'view',
'update',
'delete',
'admin',
)
@six.python_2_unicode_compatible
class Snapshot(TimeStampedModel, TitleSlugDescriptionModel):
model_permissions = _snapshot_model_permissions
object_permissions = _snapshot_object_permissions
class Meta:
unique_together = ('snapshot_id', 'account')
default_permissions = tuple(set(_snapshot_model_permissions +
_snapshot_object_permissions))
# The cloud account that has access to this snapshot
account = models.ForeignKey('cloud.CloudAccount', related_name='snapshots')
# The snapshot id. Must exist already, be preformatted, and available
# to the associated cloud account
snapshot_id = models.CharField('Snapshot ID', max_length=64)
# the type of file system the volume uses
filesystem_type = models.CharField('Filesystem Type', max_length=16, choices=FILESYSTEM_CHOICES)
def __str__(self):
return six.text_type(self.snapshot_id)
@six.python_2_unicode_compatible
class CloudRegion(TitleSlugDescriptionModel):
class Meta:
unique_together = ('title', 'provider')
ordering = ('provider', 'title')
default_permissions = ()
# link to the type of provider for this zone
provider = models.ForeignKey('cloud.CloudProvider',
verbose_name='Cloud Provider',
related_name='regions')
def __str__(self):
return six.text_type(self.title)
@six.python_2_unicode_compatible
class CloudZone(TitleSlugDescriptionModel):
class Meta:
unique_together = ('title', 'region')
ordering = ('region', 'title')
default_permissions = ()
# link to the region this AZ is in
region = models.ForeignKey('cloud.CloudRegion',
verbose_name='Cloud Region',
related_name='zones')
def __str__(self):
return six.text_type(self.title)
@property
def provider(self):
return self.region.provider
class SecurityGroupQuerySet(TransformQuerySet):
def with_rules(self):
return self.transform(self._inject_rules)
def _inject_rules(self, queryset):
"""
Pull all the security group rules using the cloud account's
implementation.
"""
by_account = {}
for group in queryset:
by_account.setdefault(group.account, []).append(group)
for account, groups in by_account.items():
group_ids = [group.group_id for group in groups]
driver = account.get_driver()
account_groups = driver.get_security_groups(group_ids)
# add in the rules
for group in groups:
group.rules = account_groups[group.name]['rules']
_securitygroup_model_permissions = (
'create',
'admin',
)
_securitygroup_object_permissions = (
'view',
'update',
'delete',
'admin',
)
@six.python_2_unicode_compatible
class SecurityGroup(TimeStampedModel, models.Model):
model_permissions = _securitygroup_model_permissions
object_permissions = _securitygroup_object_permissions
class Meta:
unique_together = ('name', 'account')
default_permissions = tuple(set(_securitygroup_model_permissions +
_snapshot_object_permissions))
objects = SecurityGroupQuerySet.as_manager()
# Name of the security group (REQUIRED)
name = models.CharField(max_length=255)
# Description of the security group (REQUIRED)
description = models.CharField(max_length=255)
# ID given by the provider
# NOTE: This will be set automatically after it has been created on the
# account and will be ignored if passed in
group_id = models.CharField(max_length=64)
# The stack that the security group is for (this is only
# useful if it's a managed security group)
stack = models.ForeignKey(
'stacks.Stack',
null=True,
related_name='security_groups'
)
blueprint_host_definition = models.ForeignKey(
'blueprints.BlueprintHostDefinition',
null=True,
default=None,
related_name='security_groups'
)
# the cloud account for this group
account = models.ForeignKey('cloud.CloudAccount', related_name='security_groups')
# ADMIN-ONLY: setting this to true will cause this security group
# to be added automatically to all machines that get started in
# the related cloud account
is_default = models.BooleanField(default=False)
# Flag for us to track which security groups were created by
# stackd.io and should be managed by the system. Any stack
# that is launched will have n security groups created and
# managed, where n is the number of distinct host definitions
# based on the blueprint used to create the stack
is_managed = models.BooleanField(default=False)
def __str__(self):
return six.text_type(self.name)
def get_active_hosts(self):
return self.hosts.count()
def rules(self):
"""
Pulls the security groups using the cloud provider
"""
driver = self.account.get_driver()
groups = driver.get_security_groups([self.group_id])
if len(groups) == 1:
return groups[0].rules
else:
raise GroupNotFoundException('The group with id "{0}" was not '
'found.'.format(self.group_id))
| {
"content_hash": "ce8ada8e54256e6a1a099ef4157e76fb",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 100,
"avg_line_length": 31.62348178137652,
"alnum_prop": 0.6373703751120215,
"repo_name": "clarkperkins/stackdio",
"id": "86f5f53495e74cd0ea0b2746162ca3b14ca027e7",
"size": "16233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackdio/api/cloud/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6462"
},
{
"name": "HTML",
"bytes": "200474"
},
{
"name": "JavaScript",
"bytes": "365621"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1034237"
},
{
"name": "SaltStack",
"bytes": "4594"
},
{
"name": "Scheme",
"bytes": "2371"
},
{
"name": "Shell",
"bytes": "6131"
}
],
"symlink_target": ""
} |
"""
Extension of chart parsing implementation to handle grammars with
feature structures as nodes.
"""
import yaml
from api import *
from chart import *
from nltk.featstruct import FeatStruct, unify, FeatStructParser
from nltk.sem.logic import SubstituteBindingsI, unique_variable
from nltk import cfg, defaultdict
from nltk.cfg import FeatStructNonterminal
from nltk.internals import Counter
import nltk.data
def load_earley(filename, trace=0, cache=False, verbose=False,
chart_class=Chart):
"""
Load a grammar from a file, and build an Earley feature parser based on
that grammar.
You can optionally specify a tracing level, for how much output you
want to see:
0: No output.
1: Show edges from scanner and completer rules (not predictor).
2 (default): Show all edges as they are added to the chart.
3: Show all edges, plus the results of successful unifications.
4: Show all edges, plus the results of all attempted unifications.
5: Show all edges, plus the results of all attempted unifications,
including those with cached results.
If C{verbose} is set to C{True}, then more diagnostic information about
grammar-loading is displayed.
"""
grammar = nltk.data.load(filename, cache=cache, verbose=verbose)
return FeatureEarleyChartParser(grammar, trace=trace,
chart_class=chart_class)
class FeatureTreeEdge(TreeEdge):
"""
A specialized tree edge that allows shared variable bindings
between nonterminals on the left-hand side and right-hand side.
Each C{FeatureTreeEdge} contains a set of C{bindings}, i.e., a
dictionary mapping from variables to values. If the edge is not
complete, then these bindings are simply stored. However, if the
edge is complete, then the constructor applies these bindings to
every nonterminal in the edge whose symbol implements the
interface L{SubstituteBindingsI}.
"""
def __init__(self, span, lhs, rhs, dot=0, bindings=None):
"""
Construct a new edge. If the edge is incomplete (i.e., if
C{dot<len(rhs)}), then store the bindings as-is. If the edge
is complete (i.e., if C{dot==len(rhs)}), then apply the
bindings to all nonterminals in C{lhs} and C{rhs}, and then
clear the bindings. See L{TreeEdge} for a description of
the other arguments.
"""
if bindings is None: bindings = {}
# If the edge is complete, then substitute in the bindings,
# and then throw them away. (If we didn't throw them away, we
# might think that 2 complete edges are different just because
# they have different bindings, even though all bindings have
# already been applied.)
if dot == len(rhs) and bindings:
lhs = self._bind(lhs, bindings)
rhs = [self._bind(elt, bindings) for elt in rhs]
bindings = {}
# Initialize the edge.
TreeEdge.__init__(self, span, lhs, rhs, dot)
self._bindings = bindings
def _bind(self, nt, bindings):
if not isinstance(nt, FeatStructNonterminal): return nt
return nt.substitute_bindings(bindings)
def next_with_bindings(self):
return self._bind(self.next(), self._bindings)
def bindings(self):
"""
Return a copy of this edge's bindings dictionary.
"""
return self._bindings.copy()
def __str__(self):
if self.is_complete():
return TreeEdge.__str__(self)
else:
bindings = '{%s}' % ', '.join('%s: %r' % item for item in
sorted(self._bindings.items()))
return '%s %s' % (TreeEdge.__str__(self), bindings)
# two edges w/ different bindings are not equal.
def __cmp__(self, other):
if self.__class__ != other.__class__: return -1
return cmp((self._span, self._lhs, self._rhs,
self._dot, self._bindings),
(other._span, other._lhs, other._rhs,
other._dot, other._bindings))
def __hash__(self):
# cache this:?
return hash((self._lhs, self._rhs, self._span, self._dot,
tuple(sorted(self._bindings))))
class FeatureFundamentalRule(FundamentalRule):
"""
A specialized version of the fundamental rule that operates on
nonterminals whose symbols are C{FeatStructNonterminal}s. Rather
tha simply comparing the nonterminals for equality, they are
unified. Variable bindings from these unifications are collected
and stored in the chart using a L{FeatureTreeEdge}. When a
complete edge is generated, these bindings are applied to all
nonterminals in the edge.
The fundamental rule states that:
- [AS{->}S{alpha}*B1S{beta}][i:j]
- [B2S{->}S{gamma}*][j:k]
licenses the edge:
- [AS{->}S{alpha}B3*S{beta}][i:j]
assuming that B1 and B2 can be unified to generate B3.
"""
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.is_incomplete() and
right_edge.is_complete() and
isinstance(left_edge, TreeEdge) and
isinstance(right_edge, TreeEdge)):
return
# Unify B1 (left_edge.next) with B2 (right_edge.lhs) to
# generate B3 (result).
bindings = left_edge.bindings() # creates a copy.
result = unify(left_edge.next(), right_edge.lhs(),
bindings, rename_vars=False)
if result is None: return
# Construct the new edge.
new_edge = FeatureTreeEdge(span=(left_edge.start(), right_edge.end()),
lhs=left_edge.lhs(), rhs=left_edge.rhs(),
dot=left_edge.dot()+1, bindings=bindings)
# Add it to the chart, with appropriate child pointers.
changed_chart = False
for cpl1 in chart.child_pointer_lists(left_edge):
if chart.insert(new_edge, cpl1+(right_edge,)):
changed_chart = True
# If we changed the chart, then generate the edge.
if changed_chart: yield new_edge
class FeatureTopDownExpandRule(TopDownExpandRule):
"""
A specialized version of the top down expand rule that operates on
nonterminals whose symbols are C{FeatStructNonterminal}s. Rather
tha simply comparing the nonterminals for equality, they are
unified.
The top down expand rule states that:
- [AS{->}S{alpha}*B1S{beta}][i:j]
licenses the edge:
- [B2S{->}*S{gamma}][j:j]
for each grammar production C{B2S{->}S{gamma}}, assuming that B1
and B2 can be unified.
"""
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
for prod in grammar.productions():
# Note: we rename vars here, because we don't want variables
# from the two different productions to match.
if unify(prod.lhs(), edge.next_with_bindings(), rename_vars=True):
new_edge = FeatureTreeEdge(span=(edge.end(), edge.end()),
lhs=prod.lhs(),
rhs=prod.rhs(), dot=0)
if chart.insert(new_edge, ()):
yield new_edge
#////////////////////////////////////////////////////////////
# Earley Parsing Rules
#////////////////////////////////////////////////////////////
class FeatureCompleterRule(CompleterRule):
"""
A specialized version of the completer rule that operates on
nonterminals whose symbols are C{FeatStructNonterminal}s. Rather
tha simply comparing the nonterminals for equality, they are
unified. See L{CompleterRule} for more information.
"""
_fundamental_rule = FeatureFundamentalRule()
def apply_iter(self, chart, grammar, edge1):
fr = self._fundamental_rule
for edge2 in chart.select(end=edge1.start(), is_complete=False):
for new_edge in fr.apply_iter(chart, grammar, edge2, edge1):
yield new_edge
class FeatureScannerRule(ScannerRule):
def apply_iter(self, chart, gramar, edge):
if edge.is_complete() or edge.end()>=chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
for pos in self._word_to_pos.get(leaf, []):
if unify(pos, edge.next_with_bindings(), rename_vars=True):
new_leaf_edge = LeafEdge(leaf, index)
if chart.insert(new_leaf_edge, ()):
yield new_leaf_edge
new_pos_edge = FeatureTreeEdge((index, index+1), pos,
[leaf], 1)
if chart.insert(new_pos_edge, (new_leaf_edge,)):
yield new_pos_edge
# This is just another name for TopDownExpandRule:
class FeaturePredictorRule(FeatureTopDownExpandRule): pass
#////////////////////////////////////////////////////////////
# Earley Parser
#////////////////////////////////////////////////////////////
## Simple Earley Chart Parser, without features
## (defined here because the feature version needs to build on it, but
## chart.py has a simpler way to use the Earley algorithm)
class EarleyChartParser(ParserI):
"""
A chart parser implementing the Earley parsing algorithm:
- For each index I{end} in [0, 1, ..., N]:
- For each I{edge} s.t. I{edge}.end = I{end}:
- If I{edge} is incomplete, and I{edge}.next is not a part
of speech:
- Apply PredictorRule to I{edge}
- If I{edge} is incomplete, and I{edge}.next is a part of
speech:
- Apply ScannerRule to I{edge}
- If I{edge} is complete:
- Apply CompleterRule to I{edge}
- Return any complete parses in the chart
C{EarleyChartParser} uses a X{lexicon} to decide whether a leaf
has a given part of speech. This lexicon is encoded as a
dictionary that maps each word to a list of parts of speech that
word can have.
@ivar _predictor_class, _completer_class, _scanner_class: The
classes that are used to implement the three rules used by the
Earley algorithm, Replacement rules can be specified by
subclasses (such as L{FeatureEarleyChartParser
<nltk.parse.featurechar.FeatureEarleyChartParser>}).
"""
_predictor_class = PredictorRule
_completer_class = CompleterRule
_scanner_class = ScannerRule
def __init__(self, grammar, trace=0, chart_class=Chart):
"""
Create a new Earley chart parser, that uses C{grammar} to
parse texts.
@type grammar: C{cfg.Grammar}
@param grammar: The grammar used to parse texts.
#@type lexicon: C{dict} from C{string} to (C{list} of C{string})
#@param lexicon: A lexicon of words that records the parts of
#speech that each word can have. Each key is a word, and
#the corresponding value is a list of parts of speech.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@param chart_class: The class that should be used to create
the charts used by this parser.
"""
if isinstance(trace, dict):
raise ValueError("Earley parser no longer takes a lexicon "
"as a separate parameter; assign the "
"lexicon when creating the grammar instead.")
self._grammar = grammar
self._lexicon = grammar.lexicon()
self._trace = trace
self._chart_class = chart_class
def grammar(self):
return self._grammar
def lexicon(self):
"""@return: The lexicon used by this parser."""
return self._lexicon
#: The default total width reserved for the chart in trace output.
#: The remainder of each line will be used to display edges.
_trace_chart_width = 40
def nbest_parse(self, tokens, n=None, tree_class=Tree):
tokens = list(tokens)
self._check_lexicon_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
w = max(2, self._trace_chart_width/(chart.num_leaves()+1))
if self._trace > 0: print ' '*9, chart.pp_leaves(w)
# Initialize the chart with a special "starter" edge.
chart.insert(self._starter_edge(grammar.start()), ())
# Create the 3 rules:
predictor = self._predictor_class()
completer = self._completer_class()
scanner = self._scanner_class(self._lexicon)
for end in range(chart.num_leaves()+1):
if self._trace > 1: print 'Processing queue %d' % end
for edge in chart.select(end=end):
if edge.is_complete():
for e in completer.apply_iter(chart, grammar, edge):
if self._trace > 0:
print 'Completer', chart.pp_edge(e,w)
if edge.is_incomplete():
for e in predictor.apply_iter(chart, grammar, edge):
if self._trace > 1:
print 'Predictor', chart.pp_edge(e,w)
if edge.is_incomplete():
for e in scanner.apply_iter(chart, grammar, edge):
if self._trace > 0:
print 'Scanner ', chart.pp_edge(e,w)
# Output a list of complete parses.
return self._parses(chart, grammar.start(), tree_class)[:n]
# This is a separate method because FeatureEarleyChartParser needs
# to replace it:
def _starter_edge(self, start_sym):
"""Return a 'starter edge' that expands to the start symbol."""
root = cfg.Nonterminal('[INIT]')
return TreeEdge((0,0), root, (start_sym,))
# This is a separate method because FeatureEarleyChartParser needs
# to replace it:
def _parses(self, chart, start_sym, tree_class):
"""Return a list of parses in the given chart."""
return chart.parses(start_sym, tree_class=tree_class)
def _check_lexicon_coverage(self, tokens):
try: 'x' in self._lexicon
except: raise ValueError('ow %r' % self._lexicon)
missing = [tok for tok in tokens if tok not in self._lexicon]
if missing:
missing = ', '.join('%r' % (w,) for w in missing)
raise ValueError("Grammar does not cover some of the "
"input words: " + missing)
class FeatureEarleyChartParser(EarleyChartParser):
"""
A chart parser implementing the Earley parsing algorithm, allowing
nonterminals that have features (known as L{FeatStructNonterminal}s).
See L{EarleyChartParser} for more details.
"""
_predictor_class = FeaturePredictorRule
_completer_class = FeatureCompleterRule
_scanner_class = FeatureScannerRule
_trace_chart_width = 10 # Edges are big, so compress the chart.
def _starter_edge(self, start):
root = FeatStructNonterminal('[*type*="[INIT]"]')
return FeatureTreeEdge((0,0), root, (start,), 0)
def _parses(self, chart, start, tree_class):
# Output a list of complete parses.
trees = []
for edge in chart.select(span=(0, chart.num_leaves())):
if unify(edge.lhs(), start, rename_vars=True):
trees += chart.trees(edge, complete=True,
tree_class=tree_class)
return trees
#////////////////////////////////////////////////////////////
# Instantiate Variable Chart
#////////////////////////////////////////////////////////////
class InstantiateVarsChart(Chart):
"""
A specialized chart that 'instantiates' variables whose names
start with '@', by replacing them with unique new variables.
In particular, whenever a complete edge is added to the chart, any
variables in the edge's C{lhs} whose names start with '@' will be
replaced by unique new L{IndVariable}s.
"""
def __init__(self, tokens):
Chart.__init__(self, tokens)
self._instantiated = set()
def insert(self, edge, child_pointer_list):
if edge in self._instantiated: return False
edge = self.instantiate_edge(edge)
return Chart.insert(self, edge, child_pointer_list)
def instantiate_edge(self, edge):
# If the edge is a leaf, or is not complete, or is
# already in the chart, then just return it as-is.
if not isinstance(edge, FeatureTreeEdge): return edge
if not edge.is_complete(): return edge
if edge in self._edge_to_cpls: return edge
# Get a list of variables that need to be instantiated.
# If there are none, then return the edge as-is.
inst_vars = self.inst_vars(edge)
if not inst_vars: return edge
# Instantiate the edge!
self._instantiated.add(edge)
lhs = edge.lhs().substitute_bindings(inst_vars)
return FeatureTreeEdge(edge.span(), lhs, edge.rhs(),
edge.dot(), edge.bindings())
counter = Counter(100)
def inst_vars(self, edge):
return dict((var, unique_variable(self.counter).variable)
for var in edge.lhs().variables()
if var.name.startswith('@'))
#////////////////////////////////////////////////////////////
# Demo
#////////////////////////////////////////////////////////////
# TODO: update to use grammar parser
def demo():
import sys, time
S = FeatStructNonterminal('S')
VP = FeatStructNonterminal('VP')
NP = FeatStructNonterminal('NP')
PP = FeatStructNonterminal('PP')
V = FeatStructNonterminal('V')
N = FeatStructNonterminal('N')
P = FeatStructNonterminal('P')
Name = FeatStructNonterminal('Name')
Det = FeatStructNonterminal('Det')
DetSg = FeatStructNonterminal('Det[-pl]')
DetPl = FeatStructNonterminal('Det[+pl]')
NSg = FeatStructNonterminal('N[-pl]')
NPl = FeatStructNonterminal('N[+pl]')
# Define some grammatical productions.
grammatical_productions = [
cfg.Production(S, (NP, VP)), cfg.Production(PP, (P, NP)),
cfg.Production(NP, (NP, PP)),
cfg.Production(VP, (VP, PP)), cfg.Production(VP, (V, NP)),
cfg.Production(VP, (V,)), cfg.Production(NP, (DetPl, NPl)),
cfg.Production(NP, (DetSg, NSg))]
# Define some lexical productions.
lexical_productions = [
cfg.Production(NP, ('John',)), cfg.Production(NP, ('I',)),
cfg.Production(Det, ('the',)), cfg.Production(Det, ('my',)),
cfg.Production(Det, ('a',)),
cfg.Production(NSg, ('dog',)), cfg.Production(NSg, ('cookie',)),
cfg.Production(V, ('ate',)), cfg.Production(V, ('saw',)),
cfg.Production(P, ('with',)), cfg.Production(P, ('under',)),
]
earley_lexicon = defaultdict(list)
for prod in lexical_productions:
earley_lexicon[prod.rhs()[0]].append(prod.lhs())
#print "Lexicon:"
#print earley_lexicon
earley_grammar = cfg.Grammar(S, grammatical_productions, earley_lexicon)
print earley_grammar
sent = 'I saw John with a dog with my cookie'
print "Sentence:\n", sent
tokens = sent.split()
t = time.time()
cp = FeatureEarleyChartParser(earley_grammar, trace=1)
trees = cp.nbest_parse(tokens)
print "Time: %s" % (time.time() - t)
for tree in trees: print tree
def run_profile():
import profile
profile.run('for i in range(1): demo()', '/tmp/profile.out')
import pstats
p = pstats.Stats('/tmp/profile.out')
p.strip_dirs().sort_stats('time', 'cum').print_stats(60)
p.strip_dirs().sort_stats('cum', 'time').print_stats(60)
if __name__ == '__main__':
demo()
print
cp = load_earley('grammars/feat0.fcfg', trace=2)
sent = 'Kim likes children'
tokens = sent.split()
trees = cp.nbest_parse(tokens)
for tree in trees:
print tree
| {
"content_hash": "e1d6e0b2c37b392aba69d1e577e96599",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 78,
"avg_line_length": 40.636182902584494,
"alnum_prop": 0.5936399217221136,
"repo_name": "hectormartinez/rougexstem",
"id": "38f1390771599ab5afccf60e66d7bd4b22ac4c7b",
"size": "20735",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/parse/featurechart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "252646"
},
{
"name": "Batchfile",
"bytes": "2712"
},
{
"name": "C",
"bytes": "3446743"
},
{
"name": "C#",
"bytes": "3511"
},
{
"name": "CSS",
"bytes": "1240"
},
{
"name": "HTML",
"bytes": "315849"
},
{
"name": "M4",
"bytes": "4099"
},
{
"name": "Makefile",
"bytes": "199393"
},
{
"name": "Perl",
"bytes": "378641"
},
{
"name": "Perl6",
"bytes": "67212"
},
{
"name": "Python",
"bytes": "3712683"
},
{
"name": "Shell",
"bytes": "319340"
},
{
"name": "TeX",
"bytes": "536677"
},
{
"name": "XQuery",
"bytes": "5987"
},
{
"name": "XS",
"bytes": "45555"
}
],
"symlink_target": ""
} |
import os, sys, time, struct, uuid, re
from . import root, ThriftTester
from . import thrift_client as client
from thrift.Thrift import TApplicationException
from ttypes import *
from constants import VERSION
def _i64(n):
return struct.pack('>q', n) # big endian = network order
_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0)]
_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
Column(_i64(6), 'value6', 0)])]
def _assert_column(column_family, key, column, value, ts = 0):
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value) )
def _assert_columnpath_exists(key, column_path):
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path) )
def _assert_no_columnpath(key, column_path):
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple(block=True):
return _insert_multi(['key1'])
def _insert_batch(block):
return _insert_multi_batch(['key1'], block)
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
def _insert_multi_batch(keys, block):
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
for key in keys:
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_range():
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_range():
client.add('key1', ColumnParent('Counter1'), CounterColumn('c1', 1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c2', 2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c3', 3), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c3'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _verify_counter_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c1'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c3'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_super_range():
client.add('key1', ColumnParent('SuperCounter1', 'sc1'), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc3'), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc2'
assert result[1].super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc3'
assert result[1].super_column.name == 'sc2'
def _verify_counter_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc2'
assert result[1].counter_super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc3'
assert result[1].counter_super_column.name == 'sc2'
def _verify_super(supercf='Super1', key='key1'):
assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_, t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
class TestMutations(ThriftTester):
def test_insert(self):
_set_keyspace('Keyspace1')
_insert_simple(False)
time.sleep(0.1)
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
assert _big_slice('key1', ColumnParent('Standard2')) == []
assert _big_slice('key1', ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
assert not client.cas('key1', 'Standard1', _SIMPLE_COLUMNS, _SIMPLE_COLUMNS)
assert client.cas('key1', 'Standard1', None, _SIMPLE_COLUMNS)
result = [cosc.column for cosc in _big_slice('key1', ColumnParent('Standard1'))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
assert dict((c.name, c.value) for c in result) == dict((c.name, c.value) for c in _SIMPLE_COLUMNS), result
assert not client.cas('key1', 'Standard1', None, _SIMPLE_COLUMNS)
def test_missing_super(self):
_set_keyspace('Keyspace1')
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
super_column_parent = ColumnParent('Super1', 'sc3')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({'key1' : cfmap }, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
def test_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_simple()
_verify_simple()
def test_super_insert(self):
_set_keyspace('Keyspace1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
_insert_super()
result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
import uuid
L = []
_set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i].column
assert u.value == 'value%s' % i
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_long_remove(self):
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardLong1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_integer_remove(self):
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardInteger1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
_insert_batch(False)
time.sleep(0.1)
_verify_batch()
def test_batch_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_batch(True)
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(27,32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_standard_columns_blocking(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(38,46)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
first_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
second_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
first_deletion = {'super_column': "sc1",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': "sc2",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = ['key_30', 'key_31']
for key in keys:
sc = SuperColumn('sc1',[Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1' : [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2' : [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_yet_accept_slice_ranges
def send_range():
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange(start='0', finish="", count=10))
d = Deletion(2, predicate=sp)
client.batch_mutate({'key_35': {'Standard1':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(send_range, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_37': {'Undefined':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert('key1', ColumnParent('Standard1'), Column('x'*1, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*127, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*128, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*129, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*255, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*256, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*257, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16 - 1), 'value', 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get('x' * 2**16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be positive
column = Column('cttl1', 'value1', 0, 0)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard ColumnFamily
deletion = Deletion(1, 'supercolumn', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Standard1' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, 'x', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Super5' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_batch_insert_super_blocking(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
== Column('c2', 'value2', 0)
assert _big_slice('key1', ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
result = _big_slice('key1', ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
# check removing the entire super cf, too.
client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {'key3': []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name='sc1',
columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, actual
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
Column(_i64(6), 'value6', 0),
Column(_i64(7), 'value7', 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column='sc2')
e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
columns = [result.column
for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), 'value5', 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
key = 'vijay'
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
# note the collated ordering rather than ascii
L = ['0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27','28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', '-a', 'b', '-b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', '-a', 'b', '-b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
predicate = SlicePredicate(column_names=['col1', 'col3'])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == 'key1'
assert result[1].key == 'key2'
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == 'col2'
assert result[0].columns[2].column.name == 'col4'
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == 'col4'
assert result[0].columns[2].column.name == 'col2'
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
assert result[1].columns[0].column.name == 'col1'
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
for key in ['a', 'b', 'c', 'd', 'e']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['d', 'e', 'a', 'b', 'c',], [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
_insert_range()
p = SlicePredicate(column_names=['c1', 'c2'])
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = ['key'+str(i) for i in range(1, num_keys+1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
keys1 = rows.keys().sort()
keys2 = keys.sort()
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert rows.has_key(key) == True
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys+1):
key = 'key'+str(i)
for j in range(1, i+1):
client.insert(key, ColumnParent('Standard1'), Column('c'+str(j), 'value'+str(j), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = ['key'+str(i) for i in range(1, num_keys+1)]
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys+1):
key = 'key'+str(i)
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
for n in xrange(1, 4):
p = SlicePredicate(slice_range=SliceRange('', '', False, n))
slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
kspaces = client.describe_keyspaces()
assert len(kspaces) == 3, kspaces # ['Keyspace2', 'Keyspace1', 'system']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break;
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
server_version = client.describe_version()
assert server_version == VERSION, (server_version, VERSION)
assert client.describe_cluster_name() == 'Test Cluster'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = client.describe_token_map().items()
assert len(ring) == 1
token, node = ring[0]
assert re.match("^Token\(bytes\[[0-9A-Fa-f]{32}\]\)", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.CollatingOrderPreservingPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == 'colA' and col1.value == 'colA-value'
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0 , 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor':'1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef('col', 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column('col', _i64(42), 0)
col1 = Column('col', "ceci n'est pas 64bit", 0)
client.insert('key0', cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily','sc1')
client.insert('key0', scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert('key1', dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column('fcol', _i64(4224), 0)
e = _expect_exception(lambda: client.insert('key1', dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column('fcol', "Stringin' it up in the Stringtel Stringifornia", 0)
client.insert('key0', dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert cfid > 1000
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='BlankCF'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef('age', 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
name_coldef = ColumnDef('name', 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
age_coldef = ColumnDef('age', 'BytesType', None, None)
name_coldef = ColumnDef('name', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef('birthdate', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert('key1', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(0.5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
""" Test simple insertion of a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl1', 'value1', 0, 5)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
assert client.get('key1', ColumnPath('Standard1', column='cttl1'), ConsistencyLevel.ONE).column == column
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
column = Column('cttl3', 'value1', 0, 2)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
column = Column('cttl4', 'value1', 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
column1 = Column('cttl4', 'value1', 0, 1)
client.insert('key1', ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column('cttl4', 'value1', 1)
client.insert('key1', ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl5', 'value1', 0, 10)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='cttl5'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='ctt5'), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_decr_super_add(self):
_set_keyspace('Keyspace1')
d1 = -234
d2 = 52345
d3 = 3123
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c2', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath('SuperCounter1', 'sc1', 'c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_batch_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
def test_incr_decr_standard_batch_remove(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
# remove the previous column and check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=['c1']))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1+d2
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_standard_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.get_slice('key1', ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1+d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_muliget_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.multiget_slice(['key1', 'key2'], ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters['key1'][0].counter_column.value == d1+d2
assert counters['key1'][1].counter_column.value == d1
assert counters['key2'][0].counter_column.value == d1+d2
assert counters['key2'][1].counter_column.value == d1
def test_counter_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_counter_range()
_verify_counter_range()
def test_counter_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_counter_super_range()
_verify_counter_super_range()
def test_index_scan(self):
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(3)), IndexExpression('birthdate', IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key3'
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert('key1', ColumnParent('Indexed3'), Column(u, 'a', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Indexed3'), Column(u2, 'b', 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange('', '', None, None, [IndexExpression(u, IndexOperator.EQ, 'a'), IndexExpression(u2, IndexOperator.EQ, 'b')], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression('foo', IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, "foo")], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0, 1), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(2)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
key = 'doesntexist'
column_path = ColumnPath(column_family="Standard1", column="idontexist")
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
key = 'key1'
# three supercoluns, each with "col1" subcolumn
for i in range(1,4):
client.insert(key, ColumnParent('Super3', 'sc%d' % i), Column('col1', 'val1', 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange('sc1', 'sc3', False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', 'sc1'), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', 'sc1'), Column('col1', 'val1', 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == 'sc1'
class TestTruncate(ThriftTester):
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
client.truncate('Standard1')
assert _big_slice('key1', ColumnParent('Standard1')) == []
# truncate Super1
client.truncate('Super1')
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
| {
"content_hash": "55b3fb7517698156b844ce6defb08ae2",
"timestamp": "",
"source": "github",
"line_count": 1966,
"max_line_length": 623,
"avg_line_length": 51.45269582909461,
"alnum_prop": 0.6208035114081221,
"repo_name": "nunezro2/cassandra_cs597",
"id": "8fecd2997aa14ee68892c235f02c5d3acf7c0b65",
"size": "102076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/system/test_thrift_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "10149969"
},
{
"name": "Python",
"bytes": "1608366"
},
{
"name": "Shell",
"bytes": "76440"
}
],
"symlink_target": ""
} |
"""
desispec.scripts.average_fiberflat
==================================
"""
from __future__ import absolute_import, division
import time
import numpy as np
from desiutil.log import get_logger
from desispec.io import read_fiberflat,write_fiberflat
from desispec.fiberflat import average_fiberflat
import argparse
def parse(options=None):
parser = argparse.ArgumentParser(description="Average fiber flats for a camera")
parser.add_argument('-i','--infile', type = str, default = None, required=True, nargs="*")
parser.add_argument('-o','--outfile', type = str, default = None, required=True)
parser.add_argument('--program', type = str, default = None, required=False,
help="only use inputs with this PROGRAM header keyword")
args = parser.parse_args(options)
return args
def main(args=None) :
if not isinstance(args, argparse.Namespace):
args = parse(args)
log=get_logger()
log.info("starting at {}".format(time.asctime()))
inputs=[]
for filename in args.infile :
inflat=read_fiberflat(filename)
if args.program is not None :
if args.program != inflat.header["PROGRAM"] :
log.info("skip {}".format(filename))
continue
inputs.append(read_fiberflat(filename))
fiberflat = average_fiberflat(inputs)
write_fiberflat(args.outfile,fiberflat)
log.info("successfully wrote %s"%args.outfile)
| {
"content_hash": "68649322ae55c0e22818c54883a97eb5",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 94,
"avg_line_length": 31.717391304347824,
"alnum_prop": 0.6511309115832762,
"repo_name": "desihub/desispec",
"id": "f6b42aae90572f7c666addc4393b3383aa76d977",
"size": "1459",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desispec/scripts/average_fiberflat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "761"
},
{
"name": "Python",
"bytes": "4219435"
},
{
"name": "Shell",
"bytes": "17927"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import KoreTestFramework
from test_framework.util import *
class WalletTest (KoreTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('90'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_koreds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_koreds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
wait_koreds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
| {
"content_hash": "25bc6ac6d3b86a155e9fd85989dd286e",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 164,
"avg_line_length": 44.22256097560975,
"alnum_prop": 0.6181316787314719,
"repo_name": "Kore-Core/kore",
"id": "f81c3cdfc4c0cda3e0b74c4cbd0f1cc33727a212",
"size": "14717",
"binary": false,
"copies": "1",
"ref": "refs/heads/momentum",
"path": "qa/rpc-tests/wallet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5751739"
},
{
"name": "C++",
"bytes": "5106026"
},
{
"name": "CSS",
"bytes": "43192"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2097"
},
{
"name": "M4",
"bytes": "146760"
},
{
"name": "Makefile",
"bytes": "99440"
},
{
"name": "Objective-C",
"bytes": "4343"
},
{
"name": "Objective-C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "680485"
},
{
"name": "QMake",
"bytes": "2017"
},
{
"name": "Roff",
"bytes": "3687"
},
{
"name": "Shell",
"bytes": "35590"
}
],
"symlink_target": ""
} |
"""Support for ESPHome fans."""
from __future__ import annotations
import math
from aioesphomeapi import FanDirection, FanInfo, FanSpeed, FanState
from homeassistant.components.fan import (
DIRECTION_FORWARD,
DIRECTION_REVERSE,
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import (
EsphomeEntity,
esphome_map_enum,
esphome_state_property,
platform_async_setup_entry,
)
ORDERED_NAMED_FAN_SPEEDS = [FanSpeed.LOW, FanSpeed.MEDIUM, FanSpeed.HIGH]
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up ESPHome fans based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="fan",
info_type=FanInfo,
entity_type=EsphomeFan,
state_type=FanState,
)
@esphome_map_enum
def _fan_directions():
return {
FanDirection.FORWARD: DIRECTION_FORWARD,
FanDirection.REVERSE: DIRECTION_REVERSE,
}
class EsphomeFan(EsphomeEntity, FanEntity):
"""A fan implementation for ESPHome."""
@property
def _static_info(self) -> FanInfo:
return super()._static_info
@property
def _state(self) -> FanState | None:
return super()._state
@property
def _supports_speed_levels(self) -> bool:
api_version = self._client.api_version
return api_version.major == 1 and api_version.minor > 3
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed percentage of the fan."""
if percentage == 0:
await self.async_turn_off()
return
data = {"key": self._static_info.key, "state": True}
if percentage is not None:
if self._supports_speed_levels:
data["speed_level"] = math.ceil(
percentage_to_ranged_value(
(1, self._static_info.supported_speed_levels), percentage
)
)
else:
named_speed = percentage_to_ordered_list_item(
ORDERED_NAMED_FAN_SPEEDS, percentage
)
data["speed"] = named_speed
await self._client.fan_command(**data)
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn on the fan."""
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
await self._client.fan_command(key=self._static_info.key, state=False)
async def async_oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
await self._client.fan_command(
key=self._static_info.key, oscillating=oscillating
)
async def async_set_direction(self, direction: str):
"""Set direction of the fan."""
await self._client.fan_command(
key=self._static_info.key, direction=_fan_directions.from_hass(direction)
)
# https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property
# pylint: disable=invalid-overridden-method
@esphome_state_property
def is_on(self) -> bool | None:
"""Return true if the entity is on."""
return self._state.state
@esphome_state_property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if not self._static_info.supports_speed:
return None
if not self._supports_speed_levels:
return ordered_list_item_to_percentage(
ORDERED_NAMED_FAN_SPEEDS, self._state.speed
)
return ranged_value_to_percentage(
(1, self._static_info.supported_speed_levels), self._state.speed_level
)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
if not self._supports_speed_levels:
return len(ORDERED_NAMED_FAN_SPEEDS)
return self._static_info.supported_speed_levels
@esphome_state_property
def oscillating(self) -> None:
"""Return the oscillation state."""
if not self._static_info.supports_oscillation:
return None
return self._state.oscillating
@esphome_state_property
def current_direction(self) -> None:
"""Return the current fan direction."""
if not self._static_info.supports_direction:
return None
return _fan_directions.from_esphome(self._state.direction)
@property
def supported_features(self) -> int:
"""Flag supported features."""
flags = 0
if self._static_info.supports_oscillation:
flags |= SUPPORT_OSCILLATE
if self._static_info.supports_speed:
flags |= SUPPORT_SET_SPEED
if self._static_info.supports_direction:
flags |= SUPPORT_DIRECTION
return flags
| {
"content_hash": "48d5546ba7142a82a5ce375c8c9fbe1d",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 85,
"avg_line_length": 30.931428571428572,
"alnum_prop": 0.6155551450212452,
"repo_name": "adrienbrault/home-assistant",
"id": "5d7cf24f2c518366e8bdd684b7219a5b8a438758",
"size": "5413",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/esphome/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from os import path
import mock
import six
import yaml
from cinder.volume.drivers.dell_emc.vnx import client
from cinder.volume.drivers.dell_emc.vnx import common
patch_sleep = mock.patch('time.sleep')
patch_vnxsystem = mock.patch('storops.VNXSystem')
def load_yaml(file_name):
yaml_file = '{}/{}'.format(path.dirname(
path.abspath(__file__)), file_name)
with open(yaml_file) as f:
res = yaml.load(f)
return res
def patch_extra_specs(specs):
return _build_patch_decorator(
'cinder.volume.volume_types.get_volume_type_extra_specs',
return_value=specs)
def patch_extra_specs_validate(return_value=None, side_effect=None):
return _build_patch_decorator(
'cinder.volume.drivers.dell_emc.vnx.common.ExtraSpecs.validate',
return_value=return_value,
side_effect=side_effect)
def _build_patch_decorator(module_str, return_value=None, side_effect=None):
def _inner_mock(func):
@six.wraps(func)
def decorator(*args, **kwargs):
with mock.patch(
module_str,
return_value=return_value,
side_effect=side_effect):
return func(*args, **kwargs)
return decorator
return _inner_mock
def build_fake_mirror_view():
primary_client = mock.create_autospec(spec=client.Client)
secondary_client = mock.create_autospec(spec=client.Client)
mirror_view = mock.create_autospec(spec=common.VNXMirrorView)
mirror_view.primary_client = primary_client
mirror_view.secondary_client = secondary_client
return mirror_view
def get_replication_device():
return {
'backend_id': 'fake_serial',
'san_ip': '192.168.1.12',
'san_login': 'admin',
'san_password': 'admin',
'storage_vnx_authentication_type': 'global',
'storage_vnx_security_file_dir': None,
}
| {
"content_hash": "7a365d8fc711e2de734a8c2d10a68cbd",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 27.63768115942029,
"alnum_prop": 0.6465652857891977,
"repo_name": "ge0rgi/cinder",
"id": "f771f99d64425506fb839c3cbd1632afc39184e7",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/volume/drivers/dell_emc/vnx/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
from setuptools import setup
import django_sha2
setup(
name='django-sha2',
version=django_sha2.__version__,
description='Enable strong password hashes (bcrypt+hmac or SHA-2) in Django by default.',
long_description=open('README.md').read(),
author='Fred Wenzel',
author_email='[email protected]',
url='http://github.com/fwenzel/django-sha2',
license='BSD',
packages=['django_sha2'],
include_package_data=True,
zip_safe=False,
install_requires=['Django>=1.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Environment :: Web Environment :: Mozilla',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| {
"content_hash": "08c12dba8dabd4863031d3ddbe83c03b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 93,
"avg_line_length": 32,
"alnum_prop": 0.6302083333333334,
"repo_name": "brianloveswords/django-sha2",
"id": "3532ed238e1d73e41013dfb786c29e5a57bd7c7f",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7257"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .config import *
from .wrappers import DictWrapper, Wrapper
class Networks(DictWrapper):
def __init__(self, resource, client, populate=False):
super(Networks, self).__init__(resource, client, populate=populate)
def wrap(self, network):
return Network(network, self.client)
def key_for(self, network):
return network.resource.name
class Network(Wrapper):
@property
def current_fee(self):
return self.recommended_fee_per_kb
| {
"content_hash": "eec9cbe6c7c86e0cf02616f5bd945bbf",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 22.956521739130434,
"alnum_prop": 0.6856060606060606,
"repo_name": "GemHQ/round-py",
"id": "dfaace36447663bcbd972283b0ff39dda315db51",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "round/networks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71021"
}
],
"symlink_target": ""
} |
"""
Copyright [Participants AERIUS Masterclass: Marco Duiker, Peer vd Sande, Alex Bouthoorn, Laurens vd Burgt, Rik Zegers, Lotte Dijk, Kaj Fabri, Esther Kokmeyer, Christa Blokhuis, Anneke Donkersloot, Japer Harbers, Roy Laurijsse, Luc Meegens, Marike Aalbers]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
from gml_templating_lib import render_gml_from_IMAERtemplate, IMAERgeometry
data = {
'year': 2020,
'version': 'BETA11_20150421_7cd628b230',
'dbVersion': 'BETA11_20150421_7cd628b230',
'situationName': 'bouwfase2020'
}
old_gmlId = 'a bogus string which will be never ased as an id'
data['featureMembers'] = []
with open('example.csv', 'rt') as csvFile:
reader = csv.DictReader(csvFile)
for row in reader:
if not old_gmlId == row['gmlId']:
# add a complete new featureMember
old_gmlId = row['gmlId']
data['featureMembers'].append({
'sectorId': row['sectorId'],
'gmlId': row['gmlId'],
'label': row['label'],
'emissionHeight': row['emissionHeight'],
'heatContent': row['heatContent'],
'geometry': str(IMAERgeometry(row['WKT'],row['gmlId'])),
'farmLodgings': [{
'farmLodgingType': row['farmLodgingType'],
'numberOfAnimals': row['numberOfAnimals'],
'farmLodgingSystemDefinitionType': row['farmLodgingSystemDefinitionType']
}]
})
else:
# add an farmLodgings to the last featureMember
# this assumes that the data in the csv file is ordered bu gmlId
data['featureMembers'][-1]['farmLodgings'].append({
'farmLodgingType': row['farmLodgingType'],
'numberOfAnimals': row['numberOfAnimals'],
'farmLodgingSystemDefinitionType': row['farmLodgingSystemDefinitionType']
})
print render_gml_from_IMAERtemplate(".", "feature_members_template.gml", **data).strip()
| {
"content_hash": "d7e6f4880ab0a11295e610fe11f1486e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 255,
"avg_line_length": 43.706896551724135,
"alnum_prop": 0.6418145956607495,
"repo_name": "aerius/aerius-imaer-templates",
"id": "0b81037806a9afb35ab380955614054203a8af93",
"size": "2535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "farmLodging/run_me.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Game Maker Language",
"bytes": "11848"
},
{
"name": "Python",
"bytes": "63022"
}
],
"symlink_target": ""
} |
"""Productions settings"""
from __future__ import absolute_import
from os import environ
from .base import *
# adding exception for improper configured settings
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# set allowed host, a must have configuration
ALLOWED_HOSTS = ['*']
# set databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'merdeka',
'USER': 'hackathon',
'PASSWORD': 'merdekadengancode',
'HOST': 'localhost',
}
}
| {
"content_hash": "2c08e67d725a6ac0fc8e1f88e6ccef71",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 55,
"avg_line_length": 23,
"alnum_prop": 0.6549789621318373,
"repo_name": "kamar42/merdeka-backend",
"id": "619ec8edb0b4518c24ad35abdb50d10cde21422c",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "merdeka/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "156"
},
{
"name": "Python",
"bytes": "41181"
}
],
"symlink_target": ""
} |
"""
@package mi.dataset.driver.fuelcell_eng.dcl
@file mi-dataset/mi/dataset/driver/fuelcell_eng/dcl/fuelcell_eng_dcl_recovered_driver.py
@author Chris Goodrich
@brief Recovered driver for the fuelcell_eng_dcl instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParticleClassKey,\
FuelCellEngDclDataParticleRecovered
from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParser
from mi.core.versioning import version
@version("15.6.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = FuelCellEngDclRecoveredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class FuelCellEngDclRecoveredDriver(SimpleDatasetDriver):
"""
The fuelcell_eng_dcl driver class extends the SimpleDatasetDriver.
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
self.parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleRecovered
}
}
parser = FuelCellEngDclParser(self.parser_config,
stream_handle,
self._exception_callback)
return parser
| {
"content_hash": "665c3c185f04ced99b1f6a6d6ec078d2",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 115,
"avg_line_length": 36.66101694915254,
"alnum_prop": 0.729079981507166,
"repo_name": "JeffRoy/mi-dataset",
"id": "ac1dcd0f133afed6e21e8fc18e2b58a962f16a3c",
"size": "2186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/fuelcell_eng/dcl/fuelcell_eng_dcl_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3610231"
}
],
"symlink_target": ""
} |
"""
Unit test for EC2 ipa.
"""
import unittest
import mock
from treadmill.infra.setup.ipa import IPA
from treadmill.infra import constants
class IPATest(unittest.TestCase):
"""Tests EC2 ipa setup."""
@mock.patch('time.time', mock.Mock(return_value=1000))
@mock.patch('treadmill.infra.subnet.Subnet')
@mock.patch('treadmill.infra.get_iam_role')
@mock.patch('treadmill.infra.configuration.IPA')
@mock.patch('treadmill.infra.connection.Connection')
@mock.patch('treadmill.infra.vpc.VPC')
@mock.patch('treadmill.infra.instances.Instances')
def test_setup_ipa(self, InstancesMock,
VPCMock, ConnectionMock, IPAConfigurationMock,
get_iam_role_mock, SubnetMock):
ConnectionMock.context.domain = 'foo.bar'
instance_mock = mock.Mock(metadata={'PrivateIpAddress': '1.1.1.1'})
instance_mock.name = 'ipa'
instance_mock.running_status = mock.Mock(return_value='passed')
instances_mock = mock.Mock(instances=[instance_mock])
InstancesMock.create = mock.Mock(return_value=instances_mock)
conn_mock = ConnectionMock('route53')
_vpc_id_mock = 'vpc-id'
_vpc_mock = VPCMock(id=_vpc_id_mock)
_vpc_mock.secgroup_ids = ['secgroup_id']
_vpc_mock.gateway_ids = [123]
conn_mock.describe_instance_status = mock.Mock(
return_value={
'InstanceStatuses': [
{'InstanceStatus': {'Details': [{'Status': 'passed'}]}}
]
}
)
_private_ip = '1.1.1.1'
_subnet_mock = mock.Mock(
persisted=False,
id='subnet-id',
vpc_id=_vpc_id_mock,
name='subnet-name',
get_instances=mock.Mock(return_value=instances_mock)
)
SubnetMock.get = mock.Mock(return_value=_subnet_mock)
_ipa_configuration_mock = IPAConfigurationMock()
_ipa_configuration_mock.get_userdata = mock.Mock(
return_value='user-data-script'
)
ipa = IPA(
name='ipa',
vpc_id=_vpc_id_mock,
)
ipa.setup(
image='foo-123',
count=1,
cidr_block='cidr-block',
key='some-key',
tm_release='release',
ipa_admin_password='ipa-admin-password',
instance_type='small',
proid='foobar',
subnet_name='sub-name'
)
get_iam_role_mock.assert_called_once_with(
name=constants.IPA_EC2_IAM_ROLE,
create=True
)
instance_mock.running_status.assert_called_once_with(refresh=True)
_subnet_mock.refresh.assert_called()
_subnet_mock.get_instances.assert_called_once_with(
refresh=True,
role='IPA'
)
_vpc_mock.create_security_group.assert_called_once()
_vpc_mock.add_secgrp_rules.assert_called_once()
_vpc_mock.delete_dhcp_options.assert_called_once()
self.assertCountEqual(
_vpc_mock.associate_dhcp_options.mock_calls,
[
mock.mock.call(default=True),
mock.mock.call([{
'Key': 'domain-name-servers', 'Values': [_private_ip]
}])
]
)
self.assertEqual(ipa.subnet.instances, instances_mock)
InstancesMock.create.assert_called_once_with(
image='foo-123',
name='ipa1-1000.foo.bar',
count=1,
subnet_id='subnet-id',
instance_type='small',
key_name='some-key',
secgroup_ids=['secgroup_id'],
user_data='user-data-script',
role='IPA'
)
_vpc_mock.load_security_group_ids.assert_called_once_with(
sg_names=['sg_common', 'ipa_secgrp']
)
_subnet_mock.persist.assert_called_once_with(
cidr_block='cidr-block',
gateway_id=123
)
self.assertEqual(
IPAConfigurationMock.mock_calls[1],
mock.mock.call(
ipa_admin_password='ipa-admin-password',
tm_release='release',
hostname='ipa1-1000.foo.bar',
vpc=_vpc_mock,
proid='foobar'
)
)
_ipa_configuration_mock.get_userdata.assert_called_once()
@mock.patch('treadmill.infra.subnet.Subnet')
@mock.patch('treadmill.infra.connection.Connection')
@mock.patch('treadmill.infra.vpc.VPC')
def test_ipa_destroy(self, VPCMock, ConnectionMock, SubnetMock):
ConnectionMock.context.domain = 'foo.bar'
_subnet_mock = SubnetMock(
subnet_name='subnet-name'
)
_vpc_id_mock = 'vpc-id'
_vpc_mock = VPCMock(id=_vpc_id_mock)
_vpc_mock.secgroup_ids = ['secgroup_id']
_instance = mock.Mock(private_ip='1.1.1.1')
_instance.name = 'ipa'
_subnet_mock.instances = mock.Mock(instances=[
_instance
])
ipa = IPA(
vpc_id='vpc-id',
name='ipa-setup'
)
ipa.subnet = _subnet_mock
ipa.destroy(
subnet_name='subnet-name'
)
_subnet_mock.destroy.assert_called_once_with(role='IPA')
_vpc_mock.delete_security_groups.assert_called_once_with(
sg_names=['ipa_secgrp']
)
| {
"content_hash": "0d5aed82c5f36f01da16e4f142a68af2",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 75,
"avg_line_length": 34.0125786163522,
"alnum_prop": 0.5536242603550295,
"repo_name": "bretttegart/treadmill",
"id": "c1ea9262c9e85b91fc4ffa5d2a440667f789895e",
"size": "5408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/infra/test_setup/test_ipa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "2975485"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "56911"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from os import path, unlink
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import close_old_connections
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon
from olympia.files.models import File
from olympia.stats.models import DownloadCount, update_inc
from . import get_date, get_stats_data
log = olympia.core.logger.getLogger('adi.downloadcounts')
def is_valid_source(src, fulls, prefixes):
"""Return True if the source is valid.
A source is valid if it is in the list of valid full sources or prefixed by
a prefix in the list of valid prefix sources.
"""
return src in fulls or any(p in src for p in prefixes)
class Command(BaseCommand):
"""Update download count metrics from stats_source in the database.
Usage:
./manage.py download_counts_from_file \
<folder> --date=YYYY-MM-DD --stats_source={s3,file}
If no date is specified, the default is the day before.
If no stats_source is specified, the default is set to s3.
If stats_source is file:
If not folder is specified, the default is `hive_results/YYYY-MM-DD/`.
This folder will be located in `<settings.SHARED_STORAGE>/tmp`.
If stats_source is s3:
This file will be located in
`<settings.AWS_STATS_S3_BUCKET>/<settings.AWS_STATS_S3_PREFIX>`.
File processed:
- download_counts/YYYY-MM-DD/000000_0
We get a row for each "addon download" request, in this format:
<count> <file id or add-on id or add-on slug> <click source>
We insert one DownloadCount entry per addon per day, and each row holds
the json-ified dict of click sources/counters.
Eg, for the above request:
date: <the date of the day the queries were made>
count: <the number of requests for this addon, for this day>
addon: <the addon that has this id>
src: {'dp-btn-primary': 1}
"""
help = __doc__
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument('folder_name', default='hive_results', nargs='?')
parser.add_argument(
'--stats_source', default='s3',
choices=['s3', 'file'],
help='Source of stats data')
parser.add_argument(
'--date', action='store', type=str,
dest='date', help='Date in the YYYY-MM-DD format.')
parser.add_argument(
'--separator', action='store', type=str, default='\t',
dest='separator', help='Field separator in file.')
def handle(self, *args, **options):
start = datetime.now() # Measure the time it takes to run the script.
day = options['date']
if not day:
day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
sep = options['separator']
if options['stats_source'] == 's3':
filepath = 's3://' + '/'.join([settings.AWS_STATS_S3_BUCKET,
settings.AWS_STATS_S3_PREFIX,
'download_counts',
day, '000000_0'])
elif options['stats_source'] == 'file':
folder = options['folder_name']
folder = path.join(settings.TMP_PATH, folder, day)
filepath = path.join(folder, 'download_counts.hive')
# Make sure we're not trying to update with mismatched data.
if get_date(filepath, sep) != day:
raise CommandError('%s file contains data for another day' %
filepath)
# First, make sure we don't have any existing counts for the same day,
# or it would just increment again the same data.
DownloadCount.objects.filter(date=day).delete()
# Memoize the files to addon relations and the DownloadCounts.
download_counts = {}
# Perf: preload all the files and slugs once and for all.
# This builds two dicts:
# - One where each key (the file_id we get from the hive query) has
# the addon_id as value.
# - One where each key (the add-on slug) has the add-on_id as value.
files_to_addon = dict(File.objects.values_list('id',
'version__addon_id'))
slugs_to_addon = dict(
Addon.unfiltered.exclude(status=amo.STATUS_NULL)
.values_list('slug', 'id'))
# Only accept valid sources, which are constants. The source must
# either be exactly one of the "full" valid sources, or prefixed by one
# of the "prefix" valid sources.
fulls = amo.DOWNLOAD_SOURCES_FULL
prefixes = amo.DOWNLOAD_SOURCES_PREFIX
count_file = get_stats_data(filepath)
for index, line in enumerate(count_file):
if index and (index % 1000000) == 0:
log.info('Processed %s lines' % index)
splitted = line[:-1].split(sep)
if len(splitted) != 4:
log.debug('Badly formatted row: %s' % line)
continue
day, counter, id_or_slug, src = splitted
try:
# Clean up data.
id_or_slug = id_or_slug.strip()
counter = int(counter)
except ValueError:
# Ignore completely invalid data.
continue
if id_or_slug.strip().isdigit():
# If it's a digit, then it should be a file id.
try:
id_or_slug = int(id_or_slug)
except ValueError:
continue
# Does this file exist?
if id_or_slug in files_to_addon:
addon_id = files_to_addon[id_or_slug]
# Maybe it's an add-on ?
elif id_or_slug in files_to_addon.values():
addon_id = id_or_slug
else:
# It's an integer we don't recognize, ignore the row.
continue
else:
# It's probably a slug.
if id_or_slug in slugs_to_addon:
addon_id = slugs_to_addon[id_or_slug]
else:
# We've exhausted all possibilities, ignore this row.
continue
if not is_valid_source(src, fulls=fulls, prefixes=prefixes):
continue
# Memoize the DownloadCount.
if addon_id in download_counts:
dc = download_counts[addon_id]
else:
dc = DownloadCount(date=day, addon_id=addon_id, count=0)
download_counts[addon_id] = dc
# We can now fill the DownloadCount object.
dc.count += counter
dc.sources = update_inc(dc.sources, src, counter)
# Close all old connections in this thread before we start creating the
# `DownloadCount` values.
# https://github.com/mozilla/addons-server/issues/6886
# If the calculation above takes too long it might happen that we run
# into `wait_timeout` problems and django doesn't reconnect properly
# (potentially because of misconfiguration).
# Django will re-connect properly after it notices that all
# connections are closed.
close_old_connections()
# Create in bulk: this is much faster.
DownloadCount.objects.bulk_create(download_counts.values(), 100)
log.info('Processed a total of %s lines' % (index + 1))
log.debug('Total processing time: %s' % (datetime.now() - start))
if options['stats_source'] == 'file':
# Clean up file.
log.debug('Deleting {path}'.format(path=filepath))
unlink(filepath)
| {
"content_hash": "8293625c7ab3336bf3f44dc41094a2b5",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 37.952153110047846,
"alnum_prop": 0.5753908219868885,
"repo_name": "kumar303/addons-server",
"id": "842b468510ce1f499fc4be77bfee639b0d2f9a62",
"size": "7932",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/olympia/stats/management/commands/download_counts_from_file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "803737"
},
{
"name": "Dockerfile",
"bytes": "3059"
},
{
"name": "HTML",
"bytes": "422013"
},
{
"name": "JavaScript",
"bytes": "1048955"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "9352"
},
{
"name": "Python",
"bytes": "5160043"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "7787"
},
{
"name": "Smarty",
"bytes": "1356"
}
],
"symlink_target": ""
} |
"""Support for SwitchBot sensors."""
from __future__ import annotations
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_MAC,
CONF_NAME,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_COORDINATOR, DOMAIN
from .coordinator import SwitchbotDataUpdateCoordinator
from .entity import SwitchbotEntity
PARALLEL_UPDATES = 1
SENSOR_TYPES: dict[str, SensorEntityDescription] = {
"rssi": SensorEntityDescription(
key="rssi",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
"battery": SensorEntityDescription(
key="battery",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
),
"lightLevel": SensorEntityDescription(
key="lightLevel",
native_unit_of_measurement="Level",
device_class=SensorDeviceClass.ILLUMINANCE,
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Switchbot sensor based on a config entry."""
coordinator: SwitchbotDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
if not coordinator.data[entry.unique_id].get("data"):
return
async_add_entities(
[
SwitchBotSensor(
coordinator,
entry.unique_id,
sensor,
entry.data[CONF_MAC],
entry.data[CONF_NAME],
)
for sensor in coordinator.data[entry.unique_id]["data"]
if sensor in SENSOR_TYPES
]
)
class SwitchBotSensor(SwitchbotEntity, SensorEntity):
"""Representation of a Switchbot sensor."""
def __init__(
self,
coordinator: SwitchbotDataUpdateCoordinator,
idx: str | None,
sensor: str,
mac: str,
switchbot_name: str,
) -> None:
"""Initialize the Switchbot sensor."""
super().__init__(coordinator, idx, mac, name=switchbot_name)
self._sensor = sensor
self._attr_unique_id = f"{idx}-{sensor}"
self._attr_name = f"{switchbot_name} {sensor.title()}"
self.entity_description = SENSOR_TYPES[sensor]
@property
def native_value(self) -> str:
"""Return the state of the sensor."""
return self.data["data"][self._sensor]
| {
"content_hash": "c141ed96f61c00edd71ba19c00e53b41",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 84,
"avg_line_length": 30.74736842105263,
"alnum_prop": 0.6603902773022937,
"repo_name": "toddeye/home-assistant",
"id": "1ee0276b7ee4ceeccac865c7fefd3e68f7e54df3",
"size": "2921",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switchbot/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import serial
import time
ser = serial.Serial('/dev/serial0', 9600, timeout=0)
time.sleep(5)
print('ready')
while True:
print(ser.readline())
time.sleep(1)
| {
"content_hash": "aac6f06b1c54d8a601e8a7d04d10b49e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 15.181818181818182,
"alnum_prop": 0.6826347305389222,
"repo_name": "embeddedemily/ember-agaricglow",
"id": "364049df20c0788c50e737a4ea6a80f535ad6647",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pi-server/gps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2417"
},
{
"name": "CSS",
"bytes": "11608"
},
{
"name": "HTML",
"bytes": "4024"
},
{
"name": "JavaScript",
"bytes": "210680"
},
{
"name": "Python",
"bytes": "1494"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CrossCat'
copyright = (u'2010-2016, MIT Probabilistic Computing Project '
u'+ Univ. of Louisville')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CrossCatdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'classoptions': ',openany,oneside',
'babel' : '\\usepackage[english]{babel}'
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CrossCat.tex', u'CrossCat Documentation',
u'MIT Probabilistic Computing Project + Univ. of Louisville', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bayesdb', u'CrossCat Documentation',
[u'MIT Probabilistic Computing Project + Univ. of Louisville'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CrossCat', u'CrossCat Documentation',
u'MIT Probabilistic Computing Project + Univ. of Louisville', 'CrossCat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "3efded3ec1b1337de006bcf6f4c9f0ae",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 112,
"avg_line_length": 32.88655462184874,
"alnum_prop": 0.702823559473617,
"repo_name": "probcomp/crosscat",
"id": "36340c676de2d97dbab98fac944d59979482663f",
"size": "9051",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/sphinx/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1948"
},
{
"name": "C++",
"bytes": "377201"
},
{
"name": "CSS",
"bytes": "477"
},
{
"name": "HTML",
"bytes": "7386"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "Makefile",
"bytes": "1989"
},
{
"name": "Matlab",
"bytes": "61001"
},
{
"name": "Python",
"bytes": "456113"
},
{
"name": "Ruby",
"bytes": "5045"
},
{
"name": "Shell",
"bytes": "2595"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from gm_pr import settings, proj_repo
from bot import tasks, slackauth
@slackauth.isFromSlack
def index(request):
project, repos = proj_repo.proj_repo(request)
if repos != None:
tasks.slack(settings.TOP_LEVEL_URL,
settings.ORG,
"%s?project=%s" % (settings.WEB_URL, project),
repos,
settings.SLACK_URL,
"#%s" % project)
return HttpResponse("Octocat thank you for your business\n")
else:
return HttpResponse("No projects found\n", status=404)
| {
"content_hash": "3cf1d67f582dc845a21d2c69eeb6187e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 36.1764705882353,
"alnum_prop": 0.5934959349593496,
"repo_name": "Genymobile/gm_pr",
"id": "7509ae5d77b20a441633a57e6fec90f638a22259",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1450"
},
{
"name": "Dockerfile",
"bytes": "1413"
},
{
"name": "HTML",
"bytes": "8891"
},
{
"name": "Python",
"bytes": "61706"
}
],
"symlink_target": ""
} |
import math
import paddle.v2 as paddle
def network_conf(hidden_size, embed_size, dict_size, is_train=True):
first_word = paddle.layer.data(
name='firstw', type=paddle.data_type.integer_value(dict_size))
second_word = paddle.layer.data(
name='secondw', type=paddle.data_type.integer_value(dict_size))
third_word = paddle.layer.data(
name='thirdw', type=paddle.data_type.integer_value(dict_size))
fourth_word = paddle.layer.data(
name='fourthw', type=paddle.data_type.integer_value(dict_size))
target_word = paddle.layer.data(
name='fifthw', type=paddle.data_type.integer_value(dict_size))
embed_param_attr = paddle.attr.Param(
name="_proj", initial_std=0.001, learning_rate=1, l2_rate=0)
embed_first_word = paddle.layer.embedding(
input=first_word, size=embed_size, param_attr=embed_param_attr)
embed_second_word = paddle.layer.embedding(
input=second_word, size=embed_size, param_attr=embed_param_attr)
embed_third_word = paddle.layer.embedding(
input=third_word, size=embed_size, param_attr=embed_param_attr)
embed_fourth_word = paddle.layer.embedding(
input=fourth_word, size=embed_size, param_attr=embed_param_attr)
embed_context = paddle.layer.concat(input=[
embed_first_word, embed_second_word, embed_third_word, embed_fourth_word
])
hidden_layer = paddle.layer.fc(
input=embed_context,
size=hidden_size,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embed_size * 8), learning_rate=1))
if is_train == True:
cost = paddle.layer.hsigmoid(
input=hidden_layer,
label=target_word,
num_classes=dict_size,
param_attr=paddle.attr.Param(name='sigmoid_w'),
bias_attr=paddle.attr.Param(name='sigmoid_b'))
return cost
else:
with paddle.layer.mixed(
size=dict_size - 1,
act=paddle.activation.Sigmoid(),
bias_attr=paddle.attr.Param(name='sigmoid_b')) as prediction:
prediction += paddle.layer.trans_full_matrix_projection(
input=hidden_layer,
param_attr=paddle.attr.Param(name='sigmoid_w'))
return prediction
| {
"content_hash": "1c9f7b38a50fd3b3299a5f08700f85bb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 42.68421052631579,
"alnum_prop": 0.6407727085902178,
"repo_name": "Canpio/models",
"id": "be6b7462a1487e906278fa2682d65add256aaa2d",
"size": "2480",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "word_embedding/hsigmoid_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "97166"
},
{
"name": "Python",
"bytes": "105496"
},
{
"name": "Shell",
"bytes": "579"
}
],
"symlink_target": ""
} |
"""Tests for the pvpc_hourly_pricing sensor component."""
from datetime import datetime, timedelta
import logging
from pytz import timezone
from homeassistant.components.pvpc_hourly_pricing import ATTR_TARIFF, DOMAIN
from homeassistant.const import CONF_NAME
from homeassistant.core import ATTR_NOW, EVENT_TIME_CHANGED
from homeassistant.setup import async_setup_component
from .conftest import check_valid_state
from tests.async_mock import patch
from tests.common import date_util
from tests.test_util.aiohttp import AiohttpClientMocker
async def _process_time_step(
hass, mock_data, key_state=None, value=None, tariff="discrimination", delta_min=60
):
state = hass.states.get("sensor.test_dst")
check_valid_state(state, tariff=tariff, value=value, key_attr=key_state)
mock_data["return_time"] += timedelta(minutes=delta_min)
hass.bus.async_fire(EVENT_TIME_CHANGED, {ATTR_NOW: mock_data["return_time"]})
await hass.async_block_till_done()
return state
async def test_sensor_availability(
hass, caplog, legacy_patchable_time, pvpc_aioclient_mock: AiohttpClientMocker
):
"""Test sensor availability and handling of cloud access."""
hass.config.time_zone = timezone("Europe/Madrid")
config = {DOMAIN: [{CONF_NAME: "test_dst", ATTR_TARIFF: "discrimination"}]}
mock_data = {"return_time": datetime(2019, 10, 27, 20, 0, 0, tzinfo=date_util.UTC)}
def mock_now():
return mock_data["return_time"]
with patch("homeassistant.util.dt.utcnow", new=mock_now):
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
caplog.clear()
assert pvpc_aioclient_mock.call_count == 2
await _process_time_step(hass, mock_data, "price_21h", 0.13896)
await _process_time_step(hass, mock_data, "price_22h", 0.06893)
assert pvpc_aioclient_mock.call_count == 4
await _process_time_step(hass, mock_data, "price_23h", 0.06935)
assert pvpc_aioclient_mock.call_count == 5
# sensor has no more prices, state is "unavailable" from now on
await _process_time_step(hass, mock_data, value="unavailable")
await _process_time_step(hass, mock_data, value="unavailable")
num_errors = sum(
1 for x in caplog.get_records("call") if x.levelno == logging.ERROR
)
num_warnings = sum(
1 for x in caplog.get_records("call") if x.levelno == logging.WARNING
)
assert num_warnings == 1
assert num_errors == 0
assert pvpc_aioclient_mock.call_count == 9
# check that it is silent until it becomes available again
caplog.clear()
with caplog.at_level(logging.WARNING):
# silent mode
for _ in range(21):
await _process_time_step(hass, mock_data, value="unavailable")
assert pvpc_aioclient_mock.call_count == 30
assert len(caplog.messages) == 0
# warning about data access recovered
await _process_time_step(hass, mock_data, value="unavailable")
assert pvpc_aioclient_mock.call_count == 31
assert len(caplog.messages) == 1
assert caplog.records[0].levelno == logging.WARNING
# working ok again
await _process_time_step(hass, mock_data, "price_00h", value=0.06821)
assert pvpc_aioclient_mock.call_count == 32
await _process_time_step(hass, mock_data, "price_01h", value=0.06627)
assert pvpc_aioclient_mock.call_count == 33
assert len(caplog.messages) == 1
assert caplog.records[0].levelno == logging.WARNING
| {
"content_hash": "9a284e6d029d4dc5090654d12369c8b7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 87,
"avg_line_length": 41.75,
"alnum_prop": 0.6608600979858464,
"repo_name": "pschmitt/home-assistant",
"id": "57861b8b72be37846d7b166c1bd3f638898f8dc6",
"size": "3674",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/pvpc_hourly_pricing/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
from django.db import transaction
import pytest
@pytest.fixture(scope='session', autouse=True)
def patch_on_commit_hook():
old_on_commit = transaction.on_commit
transaction.on_commit = lambda func, *args, **kwargs: func()
yield
transaction.on_commit = old_on_commit
@pytest.fixture(autouse=True)
def caches():
from django.conf import settings
from django.core.cache import caches
yield caches
for alias in settings.CACHES:
caches[alias]._cache.get_client().flushdb()
| {
"content_hash": "0149b808053bffed2f658a2e0630e8f7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7095516569200779,
"repo_name": "sergeii/swat4stats.com",
"id": "2547fdb7663618fd1c003d5a575bbdf71a0b1b8b",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2322"
},
{
"name": "Dockerfile",
"bytes": "3672"
},
{
"name": "HTML",
"bytes": "375515"
},
{
"name": "JavaScript",
"bytes": "3884"
},
{
"name": "Less",
"bytes": "36169"
},
{
"name": "Python",
"bytes": "265160"
},
{
"name": "Smarty",
"bytes": "829"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from django.contrib import admin
from django.utils.safestring import mark_safe
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers.data import JsonLexer
from . import models
class _PrettyResponseMixin(object):
def prettify_json(self, content):
formatter = HtmlFormatter(style='colorful')
prettified = highlight(content, JsonLexer(), formatter)
return '<style>{}</style>{}'.format(formatter.get_style_defs(), prettified)
def get_pretty_stripe_response(self, obj):
return mark_safe('<br>{}'.format(self.prettify_json(obj.stripe_response)))
@admin.register(models.StripeCustomer)
class StripeCustomerAdmin(_PrettyResponseMixin, admin.ModelAdmin):
list_display = ['__unicode__', 'user', 'timestamp_created']
search_fields = ['stripe_id', 'user']
readonly_fields = ['get_pretty_stripe_response']
fields = ['stripe_id', 'user', 'get_pretty_stripe_response']
@admin.register(models.StripeCharge)
class StripeChargeAdmin(_PrettyResponseMixin, admin.ModelAdmin):
list_display = ['__unicode__', 'stripe_customer', 'stripe_card', 'amount', 'currency', 'timestamp_created']
readonly_fields = ['get_pretty_stripe_response']
list_filter = ['currency']
fields = ['stripe_id', 'stripe_customer', 'stripe_card', 'amount', 'currency', 'get_pretty_stripe_response']
@admin.register(models.StripeCard)
class StripeCardAdmin(_PrettyResponseMixin, admin.ModelAdmin):
list_display = ['__unicode__', 'brand', 'country', 'funding', 'last4', 'exp_month', 'exp_year',
'timestamp_created']
readonly_fields = ['get_pretty_stripe_response']
list_filter = ['brand', 'country', 'funding']
fields = ['stripe_id', 'stripe_customer', 'last4', 'exp_year', 'exp_month', 'brand', 'country',
'funding', 'get_pretty_stripe_response']
| {
"content_hash": "1ea16f01ca96a0b2ee7b269f3efd35e0",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 112,
"avg_line_length": 42.30434782608695,
"alnum_prop": 0.6952723535457348,
"repo_name": "silverfix/django-ribbon",
"id": "c92501551bf0ac89834917b499af7069f91eb043",
"size": "1971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ribbon/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9385"
},
{
"name": "Python",
"bytes": "13830"
}
],
"symlink_target": ""
} |
from django.contrib.sessions.base_session import AbstractBaseSession, BaseSessionManager
class SessionManager(BaseSessionManager):
use_in_migrations = True
class Session(AbstractBaseSession):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django web site).
"""
objects = SessionManager()
@classmethod
def get_session_store_class(cls):
from django.contrib.sessions.backends.db import SessionStore
return SessionStore
class Meta(AbstractBaseSession.Meta):
db_table = "django_session"
| {
"content_hash": "4dd5e62c0ee4acbae19f8c9567def9ff",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 88,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.744,
"repo_name": "auvipy/django",
"id": "e786ab4eac4262a5e4b628e3e39dad4e224a546a",
"size": "1250",
"binary": false,
"copies": "17",
"ref": "refs/heads/main",
"path": "django/contrib/sessions/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87610"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146241"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16014747"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import MarkitUpPluginModel
class MarkItUpPlugin(CMSPluginBase):
name = _(u'MarkItUp')
model = MarkitUpPluginModel
render_template = 'djangocms_markitup/markitup.html'
plugin_pool.register_plugin(MarkItUpPlugin)
| {
"content_hash": "3bff0862242f3ad389d9b88f6ddd5d77",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 27.071428571428573,
"alnum_prop": 0.7915567282321899,
"repo_name": "nephila/djangocms-markitup",
"id": "b9b3be22f2f05b883a1a969a9b03f6d452fc305f",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_markitup/cms_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7783"
},
{
"name": "Python",
"bytes": "12438"
},
{
"name": "Shell",
"bytes": "6531"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_sullustan_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","sullustan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "caa942947b89f64b2cdff6efff93e56b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 22.923076923076923,
"alnum_prop": 0.6946308724832215,
"repo_name": "anhstudios/swganh",
"id": "2c28682476b98eff916dba95004c32f77ff6109f",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_sullustan_male.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
'''perform automatic newline conversion (DEPRECATED)
Deprecation: The win32text extension requires each user to configure
the extension again and again for each clone since the configuration
is not copied when cloning.
We have therefore made the ``eol`` as an alternative. The ``eol``
uses a version controlled file for its configuration and each clone
will therefore use the right settings from the start.
To perform automatic newline conversion, use::
[extensions]
win32text =
[encode]
** = cleverencode:
# or ** = macencode:
[decode]
** = cleverdecode:
# or ** = macdecode:
If not doing conversion, to make sure you do not commit CRLF/CR by accident::
[hooks]
pretxncommit.crlf = python:hgext.win32text.forbidcrlf
# or pretxncommit.cr = python:hgext.win32text.forbidcr
To do the same check on a server to prevent CRLF/CR from being
pushed or pulled::
[hooks]
pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
# or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
'''
from __future__ import absolute_import
import re
from mercurial.i18n import _
from mercurial.node import short
from mercurial import (
pycompat,
registrar,
)
from mercurial.utils import stringutil
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
configtable = {}
configitem = registrar.configitem(configtable)
configitem(
b'win32text',
b'warn',
default=True,
)
# regexp for single LF without CR preceding.
re_single_lf = re.compile(b'(^|[^\r])\n', re.MULTILINE)
newlinestr = {b'\r\n': b'CRLF', b'\r': b'CR'}
filterstr = {b'\r\n': b'clever', b'\r': b'mac'}
def checknewline(s, newline, ui=None, repo=None, filename=None):
# warn if already has 'newline' in repository.
# it might cause unexpected eol conversion.
# see issue 302:
# https://bz.mercurial-scm.org/302
if newline in s and ui and filename and repo:
ui.warn(
_(
b'WARNING: %s already has %s line endings\n'
b'and does not need EOL conversion by the win32text plugin.\n'
b'Before your next commit, please reconsider your '
b'encode/decode settings in \nMercurial.ini or %s.\n'
)
% (filename, newlinestr[newline], repo.vfs.join(b'hgrc'))
)
def dumbdecode(s, cmd, **kwargs):
checknewline(s, b'\r\n', **kwargs)
# replace single LF to CRLF
return re_single_lf.sub(b'\\1\r\n', s)
def dumbencode(s, cmd):
return s.replace(b'\r\n', b'\n')
def macdumbdecode(s, cmd, **kwargs):
checknewline(s, b'\r', **kwargs)
return s.replace(b'\n', b'\r')
def macdumbencode(s, cmd):
return s.replace(b'\r', b'\n')
def cleverdecode(s, cmd, **kwargs):
if not stringutil.binary(s):
return dumbdecode(s, cmd, **kwargs)
return s
def cleverencode(s, cmd):
if not stringutil.binary(s):
return dumbencode(s, cmd)
return s
def macdecode(s, cmd, **kwargs):
if not stringutil.binary(s):
return macdumbdecode(s, cmd, **kwargs)
return s
def macencode(s, cmd):
if not stringutil.binary(s):
return macdumbencode(s, cmd)
return s
_filters = {
b'dumbdecode:': dumbdecode,
b'dumbencode:': dumbencode,
b'cleverdecode:': cleverdecode,
b'cleverencode:': cleverencode,
b'macdumbdecode:': macdumbdecode,
b'macdumbencode:': macdumbencode,
b'macdecode:': macdecode,
b'macencode:': macencode,
}
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
halt = False
seen = set()
# we try to walk changesets in reverse order from newest to
# oldest, so that if we see a file multiple times, we take the
# newest version as canonical. this prevents us from blocking a
# changegroup that contains an unacceptable commit followed later
# by a commit that fixes the problem.
tip = repo[b'tip']
for rev in pycompat.xrange(
repo.changelog.tiprev(), repo[node].rev() - 1, -1
):
c = repo[rev]
for f in c.files():
if f in seen or f not in tip or f not in c:
continue
seen.add(f)
data = c[f].data()
if not stringutil.binary(data) and newline in data:
if not halt:
ui.warn(
_(
b'attempt to commit or push text file(s) '
b'using %s line endings\n'
)
% newlinestr[newline]
)
ui.warn(_(b'in %s: %s\n') % (short(c.node()), f))
halt = True
if halt and hooktype == b'pretxnchangegroup':
crlf = newlinestr[newline].lower()
filter = filterstr[newline]
ui.warn(
_(
b'\nTo prevent this mistake in your local repository,\n'
b'add to Mercurial.ini or .hg/hgrc:\n'
b'\n'
b'[hooks]\n'
b'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
b'\n'
b'and also consider adding:\n'
b'\n'
b'[extensions]\n'
b'win32text =\n'
b'[encode]\n'
b'** = %sencode:\n'
b'[decode]\n'
b'** = %sdecode:\n'
)
% (crlf, crlf, filter, filter)
)
return halt
def forbidcrlf(ui, repo, hooktype, node, **kwargs):
return forbidnewline(ui, repo, hooktype, node, b'\r\n', **kwargs)
def forbidcr(ui, repo, hooktype, node, **kwargs):
return forbidnewline(ui, repo, hooktype, node, b'\r', **kwargs)
def reposetup(ui, repo):
if not repo.local():
return
for name, fn in pycompat.iteritems(_filters):
repo.adddatafilter(name, fn)
def extsetup(ui):
# deprecated config: win32text.warn
if ui.configbool(b'win32text', b'warn'):
ui.warn(
_(
b"win32text is deprecated: "
b"https://mercurial-scm.org/wiki/Win32TextExtension\n"
)
)
| {
"content_hash": "f82fb9ea0bb2235666c863242b9f78a9",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 80,
"avg_line_length": 28.899543378995435,
"alnum_prop": 0.597882761889714,
"repo_name": "mdaniel/intellij-community",
"id": "30dd8eb7f2d3e5f5184fd80e8cfe93a668fb8afd",
"size": "6621",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/hgext/win32text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import sensor, image, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
# shouldn't ever need to use a value bigger than 2. The "bias" argument
# lets you select between min and max blending. 0.5 == midpoint filter,
# 0.0 == min filter, and 1.0 == max filter. Note that the min filter
# makes images darker while the max filter makes images lighter.
img.midpoint(1, bias=0.5)
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
| {
"content_hash": "cd298f3bafd2e4d344690793ceda5e8b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 48.72727272727273,
"alnum_prop": 0.7024253731343284,
"repo_name": "openmv/openmv",
"id": "ee9ab5d94004aa1ba9d5b0c444f60040f9511831",
"size": "1258",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "569644"
},
{
"name": "C",
"bytes": "100458455"
},
{
"name": "C++",
"bytes": "97780"
},
{
"name": "CMake",
"bytes": "10230"
},
{
"name": "Dockerfile",
"bytes": "885"
},
{
"name": "Makefile",
"bytes": "74311"
},
{
"name": "Python",
"bytes": "876980"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
} |
from cStringIO import StringIO
from decimal import Decimal
from transit.writer import Writer
from transit.reader import Reader
from transit_helpers import DecimalWriteHandler, DecimalReadHandler
# ==============================================================================
def serialize(data):
io = StringIO()
writer = Writer(io, "json")
writer.register(Decimal, DecimalWriteHandler)
writer.write(data)
return io
# ==============================================================================
def deserialize(data):
reader = Reader()
reader.read(StringIO(data))
| {
"content_hash": "91924bd7a6f76d5b5cedbfe67cfa063a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 80,
"avg_line_length": 28.85,
"alnum_prop": 0.5701906412478336,
"repo_name": "ryanwersal/python-serialization-comparison",
"id": "0a0a26c342c86cce38bcd97706f7a5032dcd39a2",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runners/run_transit_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6519"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database', '0019_auto_20170821_0842'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='rate',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Rate (United States Dollar - USD)'),
),
]
| {
"content_hash": "3256447603e206a0272c6ff054aea082",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 144,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.6207627118644068,
"repo_name": "ACLARKNET/aclarknet-database",
"id": "b3af59615a494321be8c31169823e9b815a8d2b4",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aclarknet/database/migrations/0020_auto_20170821_0844.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "167324"
},
{
"name": "JavaScript",
"bytes": "751469"
},
{
"name": "Makefile",
"bytes": "12395"
},
{
"name": "Python",
"bytes": "166944"
}
],
"symlink_target": ""
} |
"""Property type (attribute) package.
"""
__version__ = "$Revision: #1 $"
#===========================================================================
from .Alias import Alias
from .Boolean import Boolean
from .Enum import Enum
from .Float import Float
from .Instance import Instance
from .Integer import Integer
from .MplColor import MplColor
from .OneOf import OneOf
from .String import String
from .SubStyle import SubStyle
#===========================================================================
#---------------------------------------------------------------------------
| {
"content_hash": "514c8d290844d442974b8d1df5314f9b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 30.736842105263158,
"alnum_prop": 0.4623287671232877,
"repo_name": "nasa/mplStyle",
"id": "bbdde8061c97fea50f424ffcf2d54fa53dc26f6d",
"size": "2386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mplStyle/types/property/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "578438"
}
],
"symlink_target": ""
} |
import cx_Oracle
# the connection must be in PRELIM_AUTH mode
connection = cx_Oracle.connect("/",
mode = cx_Oracle.SYSDBA | cx_Oracle.PRELIM_AUTH)
connection.startup()
# the following statements must be issued in normal SYSDBA mode
connection = cx_Oracle.connect("/", mode = cx_Oracle.SYSDBA)
cursor = connection.cursor()
cursor.execute("alter database mount")
cursor.execute("alter database open")
| {
"content_hash": "8aa04d41bb7f51dfb5ad5171e1fb3994",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 31.53846153846154,
"alnum_prop": 0.7439024390243902,
"repo_name": "cloudera/hue",
"id": "05fdfdcb6c72b9654e9a168dcdc1f077d39c5d72",
"size": "1272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/cx_Oracle-6.4.1/samples/DatabaseStartup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
"""Functions for fitting the model, etc.
"""
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
def initial_period(N=100, logTmin=2, logTmax=5):
"""Return a vector of initial frequencies, evenly spaced in log *period*.
Parameters
----------
N : :class:`int`, optional
Number of frequencies, default 100.
logTmin : :class:`float`, optional
Minumum log period, default 2 (100 days).
logTmax : :class:`float`, optional
Maximum log period, default 5 (100,000 days).
Returns
-------
:class:`~numpy.ndarray`
Array containing orbital angular frequencies.
"""
Ts = np.logspace(logTmin, logTmax, N)
return 2.0*np.pi/Ts
def fitter(data, options):
"""Runs :func:`~scipy.optimize.minimize` on a set of initial guesses.
Parameters
----------
data : :class:`dict`
A dictionary containing the radial velocity data.
options : :class:`~argparse.Namespace`
Command-line options.
Returns
-------
:class:`list`
A set of fits, one for each initial period guess.
"""
from scipy.optimize import minimize
from .model import obj, dobj, d2obj
fitter_options = {'disp': False}
if options.method == 'TNC':
fitter_options['maxiter'] = 10000
w0 = initial_period()
fits = list()
fitter_args = (data.vhelio, data.mjd, data.vrelerr, options.Q)
fitter_bounds = ((None, None), (None, None), (None, None),
(2.0*np.pi*1.0e-6, 2.0*np.pi))
for k in range(len(w0)):
p0 = np.array([data.vhelio_avg, data.vscatter, 0, w0[k]])
fit = minimize(obj, p0, args=fitter_args, method=options.method,
jac=dobj, # hess=d2obj,
bounds=fitter_bounds, options=fitter_options)
fits.append(fit)
return fits
| {
"content_hash": "f9222d463ca031dec96c612a4c83a230",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 31.39344262295082,
"alnum_prop": 0.5953002610966057,
"repo_name": "weaverba137/rv",
"id": "208fe6a95febc2f13f59f4c621588746805d5bdf",
"size": "2003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rv/fitter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "673"
},
{
"name": "HTML",
"bytes": "22952"
},
{
"name": "JavaScript",
"bytes": "37630"
},
{
"name": "Python",
"bytes": "112568"
}
],
"symlink_target": ""
} |
__all__ = [
"AR",
"ARDL",
"ARIMA",
"ArmaProcess",
"AutoReg",
"DynamicFactor",
"DynamicFactorMQ",
"ETSModel",
"ExponentialSmoothing",
"Holt",
"MarkovAutoregression",
"MarkovRegression",
"SARIMAX",
"STL",
"STLForecast",
"SVAR",
"SimpleExpSmoothing",
"UECM",
"UnobservedComponents",
"VAR",
"VARMAX",
"VECM",
"acf",
"acovf",
"add_lag",
"add_trend",
"adfuller",
"range_unit_root_test",
"arima",
"arma_generate_sample",
"arma_order_select_ic",
"ardl_select_order",
"bds",
"bk_filter",
"breakvar_heteroskedasticity_test",
"ccf",
"ccovf",
"cf_filter",
"coint",
"datetools",
"detrend",
"filters",
"graphics",
"hp_filter",
"innovations",
"interp",
"kpss",
"lagmat",
"lagmat2ds",
"pacf",
"pacf_ols",
"pacf_yw",
"q_stat",
"seasonal_decompose",
"statespace",
"stattools",
"tsatools",
"var",
"x13_arima_analysis",
"x13_arima_select_order",
"zivot_andrews"
]
from . import interp, stattools, tsatools, vector_ar as var
from ..graphics import tsaplots as graphics
from .ar_model import AR, AutoReg
from .ardl import ARDL, UECM, ardl_select_order
from .arima import api as arima
from .arima.model import ARIMA
from .arima_process import ArmaProcess, arma_generate_sample
from .base import datetools
from .exponential_smoothing.ets import ETSModel
from .filters import api as filters, bk_filter, cf_filter, hp_filter
from .forecasting.stl import STLForecast
from .holtwinters import ExponentialSmoothing, Holt, SimpleExpSmoothing
from .innovations import api as innovations
from .regime_switching.markov_autoregression import MarkovAutoregression
from .regime_switching.markov_regression import MarkovRegression
from .seasonal import STL, seasonal_decompose
from .statespace import api as statespace
from .statespace.dynamic_factor import DynamicFactor
from .statespace.dynamic_factor_mq import DynamicFactorMQ
from .statespace.sarimax import SARIMAX
from .statespace.structural import UnobservedComponents
from .statespace.varmax import VARMAX
from .stattools import (
acf,
acovf,
adfuller,
arma_order_select_ic,
bds,
breakvar_heteroskedasticity_test,
ccf,
ccovf,
coint,
kpss,
pacf,
pacf_ols,
pacf_yw,
q_stat,
range_unit_root_test,
zivot_andrews
)
from .tsatools import add_lag, add_trend, detrend, lagmat, lagmat2ds
from .vector_ar.svar_model import SVAR
from .vector_ar.var_model import VAR
from .vector_ar.vecm import VECM
from .x13 import x13_arima_analysis, x13_arima_select_order
| {
"content_hash": "3430954a83a6e92cf49d707797d483bc",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 72,
"avg_line_length": 24.568807339449542,
"alnum_prop": 0.6755041075429425,
"repo_name": "bashtage/statsmodels",
"id": "a802379c6f018ba552a7f9c989253c364f15f98e",
"size": "2678",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "statsmodels/tsa/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14433387"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
"""
This shows an example of the "fivethirtyeight" styling, which
tries to replicate the styles from FiveThirtyEight.com.
"""
from matplotlib import pyplot as plt
import numpy as np
x = np.linspace(0, 10)
with plt.style.context('fivethirtyeight'):
plt.plot(x, np.sin(x) + x + np.random.randn(50))
plt.plot(x, np.sin(x) + 0.5 * x + np.random.randn(50))
plt.plot(x, np.sin(x) + 2 * x + np.random.randn(50))
plt.show()
| {
"content_hash": "acf9bdf28cc027dc6bb1d6c7e7ee2836",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.6689655172413793,
"repo_name": "yavalvas/yav_com",
"id": "4bed0a5d2ddebd7ad35afbad99456c8356bc4438",
"size": "435",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/matplotlib/lib/mpl_examples/style_sheets/plot_fivethirtyeight.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85377"
},
{
"name": "C++",
"bytes": "568744"
},
{
"name": "CSS",
"bytes": "47585"
},
{
"name": "Erlang",
"bytes": "7112"
},
{
"name": "HTML",
"bytes": "14865"
},
{
"name": "JavaScript",
"bytes": "359937"
},
{
"name": "Objective-C",
"bytes": "188937"
},
{
"name": "Perl",
"bytes": "229498"
},
{
"name": "Python",
"bytes": "7684946"
},
{
"name": "Shell",
"bytes": "1805"
}
],
"symlink_target": ""
} |
"""
MiniVC - a small and simple, mvc library for python. it doesn't do much. thats a feature.
mvc, so if nothing else, we can throw the laundry that is our code into three different piles.
inspired by puremvc python by Toby de Havilland <[email protected]>
"""
# Forked from nathants
from collections import namedtuple
Note = namedtuple("Note", "name, body, uid")
class Controller(object):
"""singleton. manages controller objects"""
_shared_state = {}
_command_map = {}
def __init__(self):
self.__dict__ = self._shared_state
self.view = View()
def handle_note(self, note):
cmd = self._command_map[note.name]
cmd(Facade(), note)
def register_command(self, name, cmd):
observer = {"func": self.handle_note, "obj": self}
self.view.register_observer(name, observer)
self._command_map[name] = cmd
def remove_command(self, name):
if name in self._command_map:
self.view.remove_observer(name, self)
del self._command_map[name]
class Model(object):
"""singleton. manages model objects"""
_shared_state = {}
_proxy_map = {}
def __init__(self):
self.__dict__ = self._shared_state
def register_proxy(self, proxy):
self._proxy_map[proxy.name] = proxy
proxy.on_register()
return proxy
def get_proxy(self, name):
proxy = self._proxy_map.get(name, None)
if not proxy:
raise LookupError("No Proxy found for name: %s" % name)
return proxy
def remove_proxy(self, name):
proxy = self._proxy_map.get(name, None)
if proxy:
del self._proxy_map[name]
proxy.on_remove()
class View(object):
"""singleton. manages view objects"""
_shared_state = {}
_observer_map = {}
_mediator_map = {}
def __init__(self):
self.__dict__ = self._shared_state
def register_observer(self, name, observer):
if not name in self._observer_map:
self._observer_map[name] = []
observers = self._observer_map[name]
observers.append(observer)
def notify_observers(self, note):
for observer in self._observer_map.get(note.name, []):
observer["func"](note)
def remove_observer(self, name, obj):
observers = self._observer_map[name]
for observer in observers:
if observer["obj"] is obj:
observers.remove(observer)
break
def register_mediator(self, mediator):
self._mediator_map[mediator.name] = mediator
for interest in mediator.interests:
observer = {"func": mediator.handle_note, "obj": mediator}
self.register_observer(interest, observer)
mediator.on_register()
return mediator
def get_mediator(self, name):
mediator = self._mediator_map.get(name, None)
if not mediator:
raise LookupError("No Mediator found for name: %s" % name)
return mediator
def remove_mediator(self, name):
mediator = self.get_mediator(name)
for interest in mediator.interests:
self.remove_observer(interest, mediator)
del self._mediator_map[name]
mediator.on_remove()
class Facade(object):
"""singleton. instantiates the mvc and exposes their api's"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self.controller = Controller()
self.model = Model()
self.view = View()
self.register_command = self.controller.register_command
self.register_proxy = self.model.register_proxy
self.register_mediator = self.view.register_mediator
self.remove_command = self.controller.remove_command
self.remove_proxy = self.model.remove_proxy
self.remove_mediator = self.view.remove_mediator
self.get_proxy = self.model.get_proxy
self.get_mediator = self.view.get_mediator
def send_note(self, name, body=None, uid=None):
self.view.notify_observers(Note(name, body, uid))
def command(facade, note):
"""use this signature for a controller"""
print facade, note
def register_command(name):
"""decorator to register a command with the controller"""
def register(cmd):
Facade().register_command(name, cmd)
return cmd
return register
class Proxy(object):
"""extend me for a model object"""
def __init__(self, name, data=None):
self.name = name
self.data = data
self.facade = Facade()
self.send_note = self.facade.send_note
def on_register(self): pass
def on_remove(self): pass
class Mediator(object):
"""extend me for a view object """
interests = [] # must be defined in subclass, not dynamically inserted
def __init__(self, name, view=None):
self.name = name
self.view = view
self.facade = Facade()
self.send_note = self.facade.send_note
def on_register(self): pass
def on_remove(self): pass
def handle_note(self, note): pass # called whenever a note is sent who's name is listed in self.interests
| {
"content_hash": "21dcde9bc1860785ff7a0edcfe7285f9",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 109,
"avg_line_length": 30.93452380952381,
"alnum_prop": 0.6159322686165095,
"repo_name": "Yoriz/minivc",
"id": "784073bd54d309fd432a01406fd296008186746e",
"size": "5197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/minivc/mvc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14859"
}
],
"symlink_target": ""
} |
import json
from glob import glob
import os.path as osp
name = 'jlab_mock_consumer'
HERE = osp.abspath(osp.dirname(__file__))
with open(osp.join(HERE, 'package.json')) as fid:
data = json.load(fid)
from setuptools import setup
js_name = data['name']
setup(name=name,
version=data['version'],
py_modules = [name],
data_files = [
(f'share/jupyter/labextensions/{js_name}', glob('static/package.json')),
(f'share/jupyter/labextensions/{js_name}/static', glob('static/static/*'))
])
| {
"content_hash": "8910a719bc6c4850542683e9c4f3ee09",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 25.047619047619047,
"alnum_prop": 0.6501901140684411,
"repo_name": "jupyter/jupyterlab",
"id": "05af57ecec5b58ea0dbb2d3337068cef8cbfb656",
"size": "526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jupyterlab/tests/mock_packages/interop/consumer/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7475"
},
{
"name": "CSS",
"bytes": "94068"
},
{
"name": "HTML",
"bytes": "1493"
},
{
"name": "JavaScript",
"bytes": "9240"
},
{
"name": "Makefile",
"bytes": "7654"
},
{
"name": "Python",
"bytes": "74649"
},
{
"name": "Shell",
"bytes": "2344"
},
{
"name": "TypeScript",
"bytes": "1090669"
}
],
"symlink_target": ""
} |
from tests.base import TestCase
from vilya.models.linecomment import CommitLineComment, PullLineComment
from vilya.models.consts import (
LINECOMMENT_TYPE_COMMIT, LINECOMMENT_TYPE_PULL)
TARGET_ID = 123
FROM_SHA = '2a200e45b0e223d13477e'
TO_SHA = '2a200e45b0e223d13477e'
OLD_PATH = 'testfolder/testfile.py'
NEW_PATH = 'testfolder/testfile2.py'
FROM_OID = '2a200e45b0e223d13477e' # TODO: oids
TO_OID = '2a200e45b0e223d13477e'
AUTHOR = 'user1'
CONTENT1 = 'test line comment content'
CONTENT2 = 'another test line comment content'
CONTENT_ZH = u'你好,再见'
class LineCommentTest(TestCase):
def test_add_comment(self):
# commit
c1 = CommitLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
assert c1.target_id == TARGET_ID
assert c1.target_type == LINECOMMENT_TYPE_COMMIT
assert c1.from_sha == FROM_SHA
assert c1.to_sha == TO_SHA
assert c1.old_path == OLD_PATH
assert c1.new_path == NEW_PATH
assert c1.from_oid == FROM_OID
assert c1.to_oid == TO_OID
assert c1.old_linenum == 20
assert c1.new_linenum == 30
assert c1.linenum == (20, 30)
assert c1.author == AUTHOR
assert c1.content == CONTENT1
assert c1.position is None
assert c1.paths
# pull
c2 = PullLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT2)
assert c2.target_id == TARGET_ID
assert c2.target_type == LINECOMMENT_TYPE_PULL
assert c2.from_sha == FROM_SHA
assert c2.to_sha == TO_SHA
assert c2.old_path == OLD_PATH
assert c2.new_path == NEW_PATH
assert c2.from_oid == FROM_OID
assert c2.to_oid == TO_OID
assert c2.old_linenum == 20
assert c2.new_linenum == 30
assert c2.linenum == (20, 30)
assert c2.author == AUTHOR
assert c2.content == CONTENT2
assert c2.position is None
assert c2.paths
def test_update_comment(self):
# commit
c1 = CommitLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
assert c1.content == CONTENT1
c1.update(CONTENT2)
c1 = CommitLineComment.get(c1.id)
assert c1.content == CONTENT2
# pull
c2 = PullLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT2)
assert c2.content == CONTENT2
c2.update(CONTENT_ZH)
c2 = CommitLineComment.get(c2.id)
assert c2.content == CONTENT_ZH
def test_delete_comment(self):
# commit
self.clear_comments(CommitLineComment, TARGET_ID, FROM_SHA)
c1 = CommitLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
cs = CommitLineComment.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
assert len(cs) == 1
c1.delete()
cs = CommitLineComment.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
assert len(cs) == 0
# pull
self.clear_comments(PullLineComment, TARGET_ID, FROM_SHA)
c2 = PullLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
cs = PullLineComment.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
assert len(cs) == 1
c2.delete()
cs = PullLineComment.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
assert len(cs) == 0
def test_gets_by_target_and_ref(self):
# commit
self.clear_comments(CommitLineComment, TARGET_ID, FROM_SHA)
c1 = CommitLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
c2 = CommitLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
c3 = CommitLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
cs = CommitLineComment.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
assert len(cs) == 3
self.clear_comments(PullLineComment, TARGET_ID, FROM_SHA)
# pull
PullLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
PullLineComment.add(TARGET_ID, FROM_SHA, TO_SHA,
OLD_PATH, NEW_PATH, FROM_OID, TO_OID,
20, 30, AUTHOR, CONTENT1)
cs = PullLineComment.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
assert len(cs) == 2
def clear_comments(self, classObj, TARGET_ID, FROM_SHA):
cs = classObj.gets_by_target_and_ref(TARGET_ID, FROM_SHA)
classObj.delete_multi(cs)
| {
"content_hash": "41e3cdafc84235c16d3bf062d79ca21d",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 74,
"avg_line_length": 41.338345864661655,
"alnum_prop": 0.5542015278283012,
"repo_name": "xtao/code",
"id": "73811de49e54aabe4133449cd3ad49354bec2aef",
"size": "5531",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_linecomment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7956218"
},
{
"name": "HTML",
"bytes": "548630"
},
{
"name": "JavaScript",
"bytes": "7771620"
},
{
"name": "Makefile",
"bytes": "568"
},
{
"name": "Mako",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "1486693"
},
{
"name": "Shell",
"bytes": "61416"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^connect/$', 'django_loginradius.views.connect', name='lr_connect'),
)
| {
"content_hash": "42d7a04bef8d47a8980ce553d842e8e3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 78,
"avg_line_length": 30.4,
"alnum_prop": 0.7039473684210527,
"repo_name": "LoginRadius/social-login-django",
"id": "0bd6bf8302f32ac592dec4bc51a28f018688b3a5",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_loginradius/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20338"
}
],
"symlink_target": ""
} |
import itk
import itk.support.types as itkt
from sys import argv, version_info
import warnings
if version_info >= (3, 8):
from typing import Sequence, TypeVar, get_type_hints, get_args, get_origin, Union
else:
from typing import Sequence, TypeVar, Union
try:
from numpy.typing import ArrayLike
except ImportError:
from numpy import ndarray as ArrayLike
input_filename = argv[1]
output_filename = argv[2]
radius = int(argv[3])
reader = itk.ImageFileReader.IUC2.New(FileName=input_filename)
# test the deduction of the template parameter from the input
filt = itk.MedianImageFilter.New(reader, Radius=radius)
filt.Update()
filt_result = filt.GetOutput()
watcher = itk.XMLFilterWatcher(filt, "filter")
# test the update of the filter with the snake case function
# and the setting of parameter inside it
result_snake_case = itk.median_image_filter(reader, radius=radius)
# SetPrimaryInputName("ValidInput");
compare_filter = itk.ComparisonImageFilter.New(filt_result, TestInput=result_snake_case)
compare_filter.Update()
assert compare_filter.GetMaximumDifference() < 0.000000001
if version_info >= (3, 8):
# Check the type hints
type_hints = get_type_hints(itk.median_image_filter, globalns={"itk": itk})
assert "args" in type_hints
args_hints = type_hints["args"]
assert get_origin(args_hints) is Union
assert itk.ImageBase in get_args(args_hints)
assert "radius" in type_hints
radius_hints = type_hints["radius"]
assert get_origin(radius_hints) is Union
assert int in get_args(radius_hints)
assert Sequence[int] in get_args(radius_hints)
assert "return" in type_hints
result_hints = type_hints["return"]
assert itk.ImageBase in get_args(args_hints)
# Check for process_object attribute pointing to the associated class
assert itk.median_image_filter.process_object is itk.MedianImageFilter
# Test that `__call__()` inside itkTemplate is deprecated. Replaced
# by snake_case functions
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = itk.MedianImageFilter(reader, radius=radius)
# Verify some things
assert len(w) == 1
print(w[-1])
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
compare_filter = itk.ComparisonImageFilter.New(filt_result, TestInput=result)
compare_filter.Update()
assert compare_filter.GetMaximumDifference() < 0.000000001
# Test that `call__()` on object is deprecated. Replaced
# by snake_case functions
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
median_filter = itk.MedianImageFilter.New(reader)
result = median_filter(reader, radius=radius)
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
compare_filter = itk.ComparisonImageFilter.New(filt_result, TestInput=result)
compare_filter.Update()
assert compare_filter.GetMaximumDifference() < 0.000000001
# test the write method
itk.imwrite(filt_result, output_filename)
| {
"content_hash": "5132f319f4a754c2230ad07cab33b714",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 34.135416666666664,
"alnum_prop": 0.7339029600244126,
"repo_name": "Kitware/ITK",
"id": "267a880038fcc20cb6f5a669cb454eddf9334fa9",
"size": "4085",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Modules/Filtering/Smoothing/wrapping/test/MedianImageFilterTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "460926"
},
{
"name": "C++",
"bytes": "35378599"
},
{
"name": "CMake",
"bytes": "1661462"
},
{
"name": "CSS",
"bytes": "17428"
},
{
"name": "HTML",
"bytes": "8373"
},
{
"name": "JavaScript",
"bytes": "1522"
},
{
"name": "Objective-C++",
"bytes": "5647"
},
{
"name": "Perl",
"bytes": "6197"
},
{
"name": "Python",
"bytes": "682573"
},
{
"name": "SWIG",
"bytes": "53991"
},
{
"name": "Shell",
"bytes": "194950"
},
{
"name": "Tcl",
"bytes": "7953"
},
{
"name": "XSLT",
"bytes": "8640"
}
],
"symlink_target": ""
} |
import chainer
from chainer import backend
class GradientLARS(object):
"""Optimizer/UpdateRule hook function for layer wise adaptive rate scaling.
See: `Large Batch Training of Convolutional Networks
<https://arxiv.org/abs/1708.03888>`_.
See: `Convergence Analysis of Gradient Descent Algorithms
with Proportional Updates
<https://arxiv.org/abs/1801.03137>`_.
This hook function scales all gradient arrays to fit to the weight norm.
In <https://arxiv.org/abs/1708.03888>,
.. math::
v_{t+1} &= m * v_t + \\gamma * \\lambda *
(\\nabla L(w_t) + \\beta w_t), \\\\
w_{t+1} &= w_{t} - v_{t+1},
where
- :math:`\\gamma` : learning_rate
- :math:`m` : momentum
- :math:`\\beta` : weight_decay
- :math:`\\eta` : lars_coeeficient
- :math:`\\lambda`: local_lr \
:math:`=\\eta * \
\\frac{\\|w_t\\|}{\\|\\nabla L(w_t)\\| + \\beta * \\|w_t\\|}`.
As :math:`lr` in chainer.optimizers.SGD or chainer.optimizers.MomentumSGD
corresponds to :math:`\\gamma * \\eta`, we define :math:`clip\\_rate` as
:math:`\\frac{\\|w_t\\|}{\\|\\nabla L(w_t)\\| + \\beta * \\|w_t\\|}`
and reformulate the aforementioned formula as:
:math:`v_{t+1} \
= m * v_t + lr * clip\\_rate * (\\nabla L(w_t) + \\beta w_t)`
and implement in this way. So you do not set lars_coeeficient.
Args:
threashold (float): If weight norm is more than threshold,
this function scales all gradient arrays to fit weight norm.
(See <https://arxiv.org/abs/1801.03137>)
weight_decay (float): Coefficient for the weight decay.
eps (float): Small value for the numerical stability.
(See <https://arxiv.org/abs/1801.03137>)
Attributes:
~optimizer_hooks.GradientLARS.threashold (float): If weight norm is
more than threshold, this function scales all
gradient arrays to fit weight norm.
(See <https://arxiv.org/abs/1801.03137>)
~optimizer_hooks.GradientLARS.weight_decay (float): Coefficient
for the weight decay.
~optimizer_hooks.GradientLARS.eps (float): Small value for the
numerical stability.
(See <https://arxiv.org/abs/1801.03137>)
~optimizer_hooks.GradientLARS.timing (string): Specifies
when this hook should be called by the
Optimizer/UpdateRule. Valid values are 'pre'
(before any updates) and 'post' (after any updates).
~optimizer_hooks.GradientLARS.call_for_each_param (bool): Specifies
if this hook is called for each parameter (``True``)
or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
"""
name = 'GradientLARS'
call_for_each_param = True
timing = 'pre'
def __init__(self, threshold=1e-2, weight_decay=0.0, eps=1e-9):
self.threshold = threshold
self.weight_decay = weight_decay
self.eps = eps
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
with chainer.using_device(param.device):
xp = param.device.xp
if xp is backend.chainerx:
# TODO(ecastill): norm in chainerx
p_norm = xp.sqrt(xp.sum(p*p))
g_norm = xp.sqrt(xp.sum(g*g))
else:
# weight norm
p_norm = xp.linalg.norm(p)
# grad norm
g_norm = xp.linalg.norm(g)
local_rate = (p_norm
/ (self.eps + g_norm + self.weight_decay * p_norm))
rate = xp.where(p_norm > self.threshold, local_rate, 1.0)
if xp is backend.cuda:
kernel = backend.cuda.elementwise(
'T p, T rate, T weight_decay',
'T g',
'g += weight_decay * p; g *= rate;',
'lars')
kernel(p, rate, self.weight_decay, g)
else:
g += self.weight_decay * p
g *= rate
| {
"content_hash": "0ace8673566c9fef0e614d5b15267e14",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 40.189189189189186,
"alnum_prop": 0.528805200627662,
"repo_name": "hvy/chainer",
"id": "4ed7a8826dd214669d71c1adb4e52329dcc7fd30",
"size": "4461",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "chainer/optimizer_hooks/gradient_lars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3796"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1688016"
},
{
"name": "CMake",
"bytes": "51351"
},
{
"name": "Cuda",
"bytes": "191633"
},
{
"name": "Dockerfile",
"bytes": "6423"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6425409"
},
{
"name": "Shell",
"bytes": "50581"
}
],
"symlink_target": ""
} |
from load import main as main_load
from scrape import main as main_scrape
from transform import main as main_transform
def entrypoints():
return {
'scrape': main_scrape,
'transform': main_transform,
'load': main_load
}
| {
"content_hash": "05510a2a615b79d46ac1edc27dadb7aa",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 25.2,
"alnum_prop": 0.6666666666666666,
"repo_name": "nhsengland/publish-o-matic",
"id": "acae94580f02d5f492e1c21857e2fa60d359ef10",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets/mhmds/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "399397"
}
],
"symlink_target": ""
} |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "my_pcl_tutorial"
PROJECT_SPACE_DIR = "/home/xuzhe/study/recogition/pourwater/install"
PROJECT_VERSION = "0.0.0"
| {
"content_hash": "ba0735ff56cbca2e5a4eaa72248fd31e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 48.285714285714285,
"alnum_prop": 0.6479289940828402,
"repo_name": "YinYangOfDao/ComputerVision",
"id": "f16ed0e8a1065b7451a6ed20da8650d45c31d311",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pourwater/build/my_pcl_tutorial/catkin_generated/pkg.installspace.context.pc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "15286"
},
{
"name": "CMake",
"bytes": "19355"
},
{
"name": "Python",
"bytes": "36812"
},
{
"name": "Shell",
"bytes": "9033"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0078_auto_20200215_1909'),
]
operations = [
migrations.AlterField(
model_name='section',
name='template',
field=models.CharField(choices=[('standard_section.html', 'standard section'), ('cards_section.html', 'cards section')], max_length=50, verbose_name='standard template'),
),
]
| {
"content_hash": "e52d758c3a07d02ef7fde58e5df22ee0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 182,
"avg_line_length": 29.5,
"alnum_prop": 0.6165254237288136,
"repo_name": "flavoi/diventi",
"id": "d85a68046299dd8b76f851e1157494274efd585e",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/landing/migrations/0079_auto_20200215_2305.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth import authenticate, login, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import logout as Signout
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.views.generic import FormView, View
from django.views.generic.list import MultipleObjectMixin
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.http import HttpResponseForbidden, Http404
from .forms import (SignupForm, AuthenticationForm,
ChangeEmailForm, EditProfileForm)
from .models import AccountsSignup
from .decorators import secure_required
from .utils import signin_redirect, get_profile_model, get_user_model
from .import signals as accounts_signals
from .import settings as accounts_settings
class ExtraContextTemplateView(TemplateView):
""" Add extra context to a simple template view """
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(ExtraContextTemplateView, self).get_context_data(*args,
**kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
# this view is used in POST requests,
# e.g. signup when the form is not valid
post = TemplateView.get
@secure_required
def activate(request, activation_key,
template_name='accounts/activate_fail.html',
success_url=None, extra_context=None):
"""
Activate a user with an activation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`AccountsSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``accounts/activation_fail.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``accounts_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
user = AccountsSignup.objects.activate_user(activation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if accounts_settings.ACCOUNTS_USE_MESSAGES:
messages.success(request,
_('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url:
redirect_to = success_url % {'username': user.username}
else:
redirect_to = reverse('accounts_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context:
extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def email_confirm(request, confirmation_key,
template_name='accounts/email_confirm_fail.html',
success_url=None, extra_context=None):
"""
Confirms an email address with a confirmation key.
Confirms a new email address by running :func:`User.objects.confirm_email`
method. If the method returns an :class:`User` the user will have his new
e-mail address set and redirected to ``success_url``. If no ``User`` is
returned the user will be represented with a fail message from
``template_name``.
:param confirmation_key:
String with a SHA1 representing the confirmation key used to verify a
new email address.
:param template_name:
String containing the template name which should be rendered when
confirmation fails. When confirmation is successful, no template is
needed because the user will be redirected to ``success_url``.
:param success_url:
String containing the URL which is redirected to after a successful
confirmation. Supplied argument must be able to be rendered by
``reverse`` function.
:param extra_context:
Dictionary of variables that are passed on to the template supplied by
``template_name``.
"""
user = AccountsSignup.objects.confirm_email(confirmation_key)
if user:
if accounts_settings.ACCOUNTS_USE_MESSAGES:
messages.success(request,
_('Your email address has been changed.'),
fail_silently=True)
if success_url:
redirect_to = success_url
else:
redirect_to = reverse('accounts_email_confirm_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context:
extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def direct_to_user_template(request, username, template_name,
extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access
to the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``accounts/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if not extra_context:
extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signin(request, auth_form=AuthenticationForm,
template_name='accounts/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``ACCOUNTS_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by accounts.
:param template_name:
String defining the name of the template to use. Defaults to
``accounts/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification = form.cleaned_data['identification']
password = form.cleaned_data['password']
remember_me = form.cleaned_data['remember_me']
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(accounts_settings.ACCOUNTS_REMEMBER_ME_DAYS[1] * 86400)
else:
request.session.set_expiry(0)
if accounts_settings.ACCOUNTS_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(redirect_field_name), user)
return redirect(redirect_to)
else:
return redirect(reverse('accounts_disabled',
kwargs={'username': user.username}))
if not extra_context:
extra_context = dict()
extra_context.update({
'form': form,
'next': request.GET.get(redirect_field_name),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signout(request, next_page=accounts_settings.ACCOUNTS_REDIRECT_ON_SIGNOUT,
template_name='accounts/signout.html', *args, **kwargs):
"""
Signs out the user and adds a success message ``You have been signed
out.`` If next_page is defined you will be redirected to the URI. If
not the template in template_name is used.
:param next_page:
A string which specifies the URI to redirect to.
:param template_name:
String defining the name of the template to use. Defaults to
``accounts/signout.html``.
"""
if request.user.is_authenticated() and \
accounts_settings.ACCOUNTS_USE_MESSAGES: # pragma: no cover
messages.success(request, _('You have been signed out.'),
fail_silently=True)
return Signout(request, next_page, template_name, *args, **kwargs)
@secure_required
def email_change(request, username, email_form=ChangeEmailForm,
template_name='accounts/email_form.html', success_url=None,
extra_context=None):
"""
Change email address
:param username:
String of the username which specifies the current account.
:param email_form:
Form that will be used to change the email address. Defaults to
:class:`ChangeEmailForm` supplied by accounts.
:param template_name:
String containing the template to be used to display the email form.
Defaults to ``accounts/email_form.html``.
:param success_url:
Named URL where the user will get redirected to when successfully
changing their email address. When not supplied will redirect to
``accounts_email_complete`` URL.
:param extra_context:
Dictionary containing extra variables that can be used to render the
template. The ``form`` key is always the form supplied by the keyword
argument ``form`` and the ``user`` key by the user whose email address
is being changed.
**Context**
``form``
Form that is used to change the email address supplied by ``form``.
``account``
Instance of the ``Account`` whose email address is about to be changed.
**Todo**
Need to have per-object permissions, which enables users with the correct
permissions to alter the email address of others.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
form = email_form(user)
if request.method == 'POST':
form = email_form(user,
request.POST,
request.FILES)
if form.is_valid():
form.save()
if success_url:
redirect_to = success_url
else:
redirect_to = reverse('accounts_email_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def password_change(request, username,
template_name='accounts/password_form.html',
pass_form=PasswordChangeForm, success_url=None,
extra_context=None):
""" Change password of user.
This view is almost a mirror of the view supplied in
:func:`contrib.auth.views.password_change`, with the minor change that in
this view we also use the username to change the password. This was needed
to keep our URLs logical (and REST) across the entire application. And
that in a later stadium administrators can also change the users password
through the web application itself.
:param username:
String supplying the username of the user who's password is about to be
changed.
:param template_name:
String of the name of the template that is used to display the password
change form. Defaults to ``accounts/password_form.html``.
:param pass_form:
Form used to change password. Default is the form supplied by Django
itself named ``PasswordChangeForm``.
:param success_url:
Named URL that is passed onto a :func:`reverse` function with
``username`` of the active user. Defaults to the
``accounts_password_complete`` URL.
:param extra_context:
Dictionary of extra variables that are passed on to the template. The
``form`` key is always used by the form supplied by ``pass_form``.
**Context**
``form``
Form used to change the password.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
form = pass_form(user=user)
if request.method == "POST":
form = pass_form(user=user, data=request.POST)
if form.is_valid():
form.save()
# Send a signal that the password has changed
accounts_signals.password_complete.send(sender=None,
user=user)
if success_url:
redirect_to = success_url
else:
redirect_to = reverse('accounts_password_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def profile_edit(request, username, edit_profile_form=EditProfileForm,
template_name='accounts/profile_form.html', success_url=None,
extra_context=None, **kwargs):
"""
Edit profile.
Edits a profile selected by the supplied username. First checks
permissions if the user is allowed to edit this profile, if denied will
show a 404. When the profile is successfully edited will redirect to
``success_url``.
:param username:
Username of the user which profile should be edited.
:param edit_profile_form:
Form that is used to edit the profile. The :func:`EditProfileForm.save`
method of this form will be called when the form
:func:`EditProfileForm.is_valid`. Defaults to :class:`EditProfileForm`
from accounts.
:param template_name:
String of the template that is used to render this view. Defaults to
``accounts/edit_profile_form.html``.
:param success_url:
Named URL which will be passed on to a django ``reverse`` function
after the form is successfully saved. Defaults to the
``accounts_detail`` url.
:param extra_context:
Dictionary containing variables that are passed on to the
``template_name`` template. ``form`` key will always be the form used
to edit the profile, and the ``profile`` key is always the edited
profile.
**Context**
``form``
Form that is used to alter the profile.
``profile``
Instance of the ``Profile`` that is edited.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
profile = user.get_profile()
user_initial = {'first_name': user.first_name,
'last_name': user.last_name}
form = edit_profile_form(instance=profile, initial=user_initial)
if request.method == 'POST':
form = edit_profile_form(request.POST, request.FILES, instance=profile,
initial=user_initial)
if form.is_valid():
profile = form.save()
if accounts_settings.ACCOUNTS_USE_MESSAGES:
messages.success(request, _('Your profile has been updated.'),
fail_silently=True)
if success_url:
redirect_to = success_url
else:
redirect_to = reverse('accounts_profile_detail',
kwargs={'username': username})
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = profile
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_detail(
request, username,
template_name=accounts_settings.ACCOUNTS_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
profile_model = get_profile_model()
try:
profile = user.get_profile()
except profile_model.DoesNotExist:
profile = profile_model(user=user)
profile.save()
if not profile.can_view_profile(request.user):
return HttpResponseForbidden(_("You don't have permission to view this profile."))
if not extra_context:
extra_context = dict()
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def account_delete(request, username,
template_name=accounts_settings.ACCOUNTS_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Delete an account.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
user.is_active = False
user.save()
return redirect(reverse('accounts_admin'))
class ProfileListView(ListView):
"""
Lists all profiles
"""
context_object_name = 'profile_list'
page = 1
paginate_by = 20
template_name = 'accounts/profile_list.html'
extra_context = None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if not self.request.user.is_staff:
raise Http404
if not self.extra_context:
self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
context['form'] = SignupForm()
return context
def get_queryset(self):
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(self.request.user)
return queryset
class AccountsFormView(FormView, MultipleObjectMixin):
template_name = 'accounts/profile_list.html'
form_class = SignupForm
def get_context_data(self, **kwargs):
context = super(AccountsFormView, self).get_context_data(**kwargs)
return context
def get_success_url(self):
return reverse(
'accounts_admin',
kwargs=None
)
def form_valid(self, form):
if not self.request.user.is_authenticated():
return HttpResponseForbidden()
user = form.save()
# Send the signup complete signal
accounts_signals.signup_complete.send(sender=None,
user=user)
# record the interest using the message in form.cleaned_data
return super(AccountsFormView, self).form_valid(form)
class AccountsListView(View):
def get(self, request, *args, **kwargs):
view = ProfileListView.as_view()
return view(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
view = AccountsFormView.as_view()
return view(request, *args, **kwargs)
| {
"content_hash": "53890d71e074b00162a0b84fa62c3183",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 102,
"avg_line_length": 36.13230769230769,
"alnum_prop": 0.6268841011666525,
"repo_name": "ff0000/scarlet",
"id": "15cd0a17184f438ea11b94390e7f7db4eeb76447",
"size": "23486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scarlet/accounts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "217430"
},
{
"name": "HTML",
"bytes": "43216"
},
{
"name": "JavaScript",
"bytes": "2200686"
},
{
"name": "Python",
"bytes": "508579"
},
{
"name": "Ruby",
"bytes": "485"
},
{
"name": "Shell",
"bytes": "1813"
}
],
"symlink_target": ""
} |
from django.conf import settings
from open511_server.utils.exceptions import BadRequest
class APIPaginator(object):
"""
Largely cribbed from django-tastypie.
"""
def __init__(self, request, objects, limit=None, offset=0, max_limit=500):
"""
Instantiates the ``Paginator`` and allows for some configuration.
The ``objects`` should be a list-like object of ``Resources``.
This is typically a ``QuerySet`` but can be anything that
implements slicing. Required.
Optionally accepts a ``limit`` argument, which specifies how many
items to show at a time. Defaults to ``None``, which is no limit.
Optionally accepts an ``offset`` argument, which specifies where in
the ``objects`` to start displaying results from. Defaults to 0.
"""
self.request_data = request.GET
self.objects = objects
self.limit = limit
self.max_limit = max_limit
self.offset = offset
self.resource_uri = request.path
def get_limit(self):
"""
Determines the proper maximum number of results to return.
In order of importance, it will use:
* The user-requested ``limit`` from the GET parameters, if specified.
* The object-level ``limit`` if specified.
* ``settings.API_LIMIT_PER_PAGE`` if specified.
Default is 50 per page.
"""
settings_limit = getattr(settings, 'API_LIMIT_PER_PAGE', 50)
if 'limit' in self.request_data:
limit = self.request_data['limit']
elif self.limit is not None:
limit = self.limit
else:
limit = settings_limit
try:
limit = int(limit)
except ValueError:
raise BadRequest("Invalid limit '%s' provided. Please provide a positive integer." % limit)
if limit == 0:
if self.limit:
limit = self.limit
else:
limit = settings_limit
if limit < 0:
raise BadRequest("Invalid limit '%s' provided. Please provide a positive integer >= 0." % limit)
if self.max_limit and limit > self.max_limit:
return self.max_limit
return limit
def get_offset(self):
"""
Determines the proper starting offset of results to return.
It attempst to use the user-provided ``offset`` from the GET parameters,
if specified. Otherwise, it falls back to the object-level ``offset``.
Default is 0.
"""
offset = self.offset
if 'offset' in self.request_data:
offset = self.request_data['offset']
try:
offset = int(offset)
except ValueError:
raise BadRequest("Invalid offset '%s' provided. Please provide an integer." % offset)
if offset < 0:
raise BadRequest("Invalid offset '%s' provided. Please provide a positive integer >= 0." % offset)
return offset
def _generate_uri(self, limit, offset):
if self.resource_uri is None:
return None
# QueryDict has a urlencode method that can handle multiple values for the same key
request_params = self.request_data.copy()
if 'limit' in request_params:
del request_params['limit']
if 'offset' in request_params:
del request_params['offset']
request_params.update({'limit': limit, 'offset': max(offset, 0)})
encoded_params = request_params.urlencode()
return '%s?%s' % (
self.resource_uri,
encoded_params
)
def page(self):
"""
Returns a tuple of (objects, page_data), where objects is one page of objects (a list),
and page_data is a dict of pagination info.
"""
limit = self.get_limit()
offset = self.get_offset()
page_data = {
'offset': offset,
# 'limit': limit,
}
# We get one more object than requested, to see if
# there's a next page.
objects = list(self.objects[offset:offset + limit + 1])
if len(objects) > limit:
objects.pop()
page_data['next_url'] = self._generate_uri(limit, offset + limit)
else:
page_data['next_url'] = None
page_data['previous_url'] = (self._generate_uri(limit, offset - limit)
if offset > 0 else None)
return (objects, page_data) | {
"content_hash": "35ea30cf9cd6f65ba82f900f8bf4c7f3",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 110,
"avg_line_length": 32.846715328467155,
"alnum_prop": 0.5802222222222222,
"repo_name": "Open511/open511-server",
"id": "57e7a41cebeb0115747634533a76d359874de401",
"size": "4500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open511_server/utils/pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7506"
},
{
"name": "JavaScript",
"bytes": "8822"
},
{
"name": "Python",
"bytes": "96467"
}
],
"symlink_target": ""
} |
from licant.modules import submodule
from licant.cxx_modules import application
from licant.scripter import scriptq
import licant
scriptq.execute("../../gxx.g.py")
application("target",
sources = ["main.c"],
include_paths = ["../.."],
modules = [
submodule("gxx.c_only"),
submodule("gxx.dprint", "stdout"),
],
)
licant.ex("target")
| {
"content_hash": "726d3f6062099eba9aa548a4c4fa97e9",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 42,
"avg_line_length": 20.176470588235293,
"alnum_prop": 0.685131195335277,
"repo_name": "Mirmik/gxx",
"id": "9d3a12b8a4f8577844ea0b3c6d84fa3937759082",
"size": "382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/HIDE/ds_sline/make.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "64"
},
{
"name": "C",
"bytes": "690591"
},
{
"name": "C++",
"bytes": "1010156"
},
{
"name": "Lua",
"bytes": "2409"
},
{
"name": "Objective-C",
"bytes": "4072"
},
{
"name": "Python",
"bytes": "49652"
},
{
"name": "QMake",
"bytes": "955"
}
],
"symlink_target": ""
} |
"""
Hammer projection (http://en.wikipedia.org/wiki/Hammer_projection)
"""
from glumpy import library
from . transform import Transform
class HammerProjection(Transform):
""" Hammer projection """
aliases = { }
def __init__(self, *args, **kwargs):
"""
Initialize the transform.
Note that parameters must be passed by name (param=value).
Kwargs parameters
-----------------
"""
code = library.get("transforms/hammer.glsl")
Transform.__init__(self, code, *args, **kwargs)
def on_attach(self, program):
""" Initialization event """
pass
| {
"content_hash": "79cdaeb44bb8498e21a1b42467d48cdf",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 66,
"avg_line_length": 22.06896551724138,
"alnum_prop": 0.5828125,
"repo_name": "glumpy/glumpy",
"id": "15923a5b3215979238c864326ac77a1447fc74a4",
"size": "910",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glumpy/transforms/hammer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "Cython",
"bytes": "660"
},
{
"name": "GLSL",
"bytes": "177965"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1320773"
}
],
"symlink_target": ""
} |
"""Module for Client for kripo web service"""
from __future__ import absolute_import
import requests
from rdkit.Chem.AllChem import MolFromMolBlock
from requests import HTTPError
class Incomplete(Exception):
def __init__(self, message, absent_identifiers):
super(Incomplete, self).__init__(message)
self.absent_identifiers = absent_identifiers
class IncompleteFragments(Incomplete):
def __init__(self, absent_identifiers, fragments):
"""List of fragments and list of identifiers for which no information could be found
Args:
absent_identifiers (List[str]): List of identifiers for which no information could be found
fragments (List[dict]): List of fragment information that could be retrieved
"""
message = 'Some identifiers could not be found'
super(IncompleteFragments, self).__init__(message, absent_identifiers)
self.fragments = fragments
class IncompletePharmacophores(Incomplete):
def __init__(self, absent_identifiers, pharmacophores):
"""List of fragments and list of identifiers for which no information could be found
Args:
absent_identifiers (List[str]): List of identifiers for which no information could be found
pharmacophores (List[dict]): List of pharmacophores that could be retrieved
"""
message = 'Some identifiers could not be found'
super(IncompletePharmacophores, self).__init__(message, absent_identifiers)
self.pharmacophores = pharmacophores
class WebserviceClient(object):
"""Client for kripo web service
Example:
>>> client = WebserviceClient('http://localhost:8084/kripo')
>>> client.similar_fragments('3j7u_NDP_frag24', 0.85)
[{'query_frag_id': '3j7u_NDP_frag24', 'hit_frag_id': '3j7u_NDP_frag23', 'score': 0.8991}]
Args:
base_url (str): Base url of web service. e.g. http://localhost:8084/kripo
"""
def __init__(self, base_url):
self.base_url = base_url
def similar_fragments(self, fragment_id, cutoff, limit=1000):
"""Find similar fragments to query.
Args:
fragment_id (str): Query fragment identifier
cutoff (float): Cutoff, similarity scores below cutoff are discarded.
limit (int): Maximum number of hits. Default is None for no limit.
Returns:
list[dict]: Query fragment identifier, hit fragment identifier and similarity score
Raises:
request.HTTPError: When fragment_id could not be found
"""
url = self.base_url + '/fragments/{fragment_id}/similar'.format(fragment_id=fragment_id)
params = {'cutoff': cutoff, 'limit': limit}
response = requests.get(url, params)
response.raise_for_status()
return response.json()
def fragments_by_pdb_codes(self, pdb_codes, chunk_size=450):
"""Retrieve fragments by their PDB code
Args:
pdb_codes (List[str]): List of PDB codes
chunk_size (int): Number of PDB codes to retrieve in a single http request
Returns:
list[dict]: List of fragment information
Raises:
requests.HTTPError: When one of the PDB codes could not be found.
"""
return self._fetch_chunked_fragments('pdb_codes', pdb_codes, chunk_size)
def fragments_by_id(self, fragment_ids, chunk_size=100):
"""Retrieve fragments by their identifier
Args:
fragment_ids (List[str]): List of fragment identifiers
chunk_size (int): Number of fragment to retrieve in a single http request
Returns:
list[dict]: List of fragment information
Raises:
IncompleteFragments: When one or more of the identifiers could not be found.
"""
return self._fetch_chunked_fragments('fragment_ids', fragment_ids, chunk_size)
def _fetch_chunked_fragments(self, idtype, ids, chunk_size):
fragments = []
absent_identifiers = []
for start in range(0, len(ids), chunk_size):
stop = chunk_size + start
(chunk_fragments, chunk_absent_identifiers) = self._fetch_fragments(idtype, ids[start:stop])
fragments += chunk_fragments
absent_identifiers += chunk_absent_identifiers
if chunk_absent_identifiers:
raise IncompleteFragments(absent_identifiers, fragments)
return fragments
def _fetch_fragments(self, idtype, ids):
url = self.base_url + '/fragments?{idtype}={ids}'.format(idtype=idtype, ids=','.join(ids))
absent_identifiers = []
try:
response = requests.get(url)
response.raise_for_status()
fragments = response.json()
except HTTPError as e:
if e.response.status_code == 404:
body = e.response.json()
fragments = body['fragments']
absent_identifiers = body['absent_identifiers']
else:
raise e
# Convert molblock string to RDKit Mol object
for fragment in fragments:
if fragment['mol'] is not None:
fragment['mol'] = MolFromMolBlock(fragment['mol'])
return fragments, absent_identifiers
def pharmacophores(self, fragment_ids):
absent_identifiers = []
pharmacophores = []
for fragment_id in fragment_ids:
url = self.base_url + '/fragments/{0}.phar'.format(fragment_id)
try:
response = requests.get(url)
response.raise_for_status()
pharmacophore = response.text
pharmacophores.append(pharmacophore)
except HTTPError as e:
if e.response.status_code == 404:
pharmacophores.append(None)
absent_identifiers.append(fragment_id)
else:
raise e
if absent_identifiers:
raise IncompletePharmacophores(absent_identifiers, pharmacophores)
return pharmacophores
| {
"content_hash": "91db6dd65ab5521f19b31e2e07ae0cf5",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 104,
"avg_line_length": 39.43225806451613,
"alnum_prop": 0.6223821989528796,
"repo_name": "3D-e-Chem/python-modified-tanimoto",
"id": "d378e47dc249ba85df58d81620a82453a7dc65c8",
"size": "6701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kripodb/webservice/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "107247"
}
],
"symlink_target": ""
} |
from girder.constants import AccessType
from girder.models.assetstore import Assetstore
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.group import Group
from girder.models.setting import Setting
from girder.settings import SettingKey
from girder_large_image.constants import PluginSettings as LargeImageSettings
from isic_archive import settings
from .models.user import User
def getAdminUser():
# TODO: cache this?
return User().findOne({'login': 'isic-admin'})
def _setGirderSettings():
Setting().set(SettingKey.USER_DEFAULT_FOLDERS, 'none')
Setting().set(SettingKey.CONTACT_EMAIL_ADDRESS, '[email protected]')
Setting().set(SettingKey.BRAND_NAME, 'ISIC Archive')
# TODO: Make email verification required when not in development
Setting().set(SettingKey.EMAIL_VERIFICATION, 'optional')
# TODO: restart after ROUTE_TABLE is set?
Setting().set(SettingKey.ROUTE_TABLE, {
'core_girder': '/girder',
'core_static_root': '/static',
'markup': '/markup',
})
Setting().set(SettingKey.CORS_ALLOW_ORIGIN, ','.join(settings.ISIC_CORS_ORIGINS))
Setting().set(SettingKey.CORS_ALLOW_METHODS, 'HEAD, GET, POST, PUT, DELETE')
Setting().set(SettingKey.EMAIL_HOST, settings.ISIC_GIRDER_SITE_URL)
for isicSettingValue, girderSettingKey in [
(settings.ISIC_SMTP_HOST, SettingKey.SMTP_HOST),
(settings.ISIC_SMTP_PORT, SettingKey.SMTP_PORT),
(settings.ISIC_SMTP_USERNAME, SettingKey.SMTP_USERNAME),
(settings.ISIC_SMTP_PASSWORD, SettingKey.SMTP_PASSWORD),
(settings.ISIC_SMTP_ENCRYPTION, SettingKey.SMTP_ENCRYPTION),
]:
if isicSettingValue is not None:
Setting().set(girderSettingKey, isicSettingValue)
def _setLargeImageSettings():
Setting().set(LargeImageSettings.LARGE_IMAGE_AUTO_SET, False)
Setting().set(LargeImageSettings.LARGE_IMAGE_MAX_SMALL_IMAGE_SIZE, 0)
# TODO: consider saving thumbnail files
Setting().set(LargeImageSettings.LARGE_IMAGE_MAX_THUMBNAIL_FILES, 0)
def _provisionAdminUser():
adminUser = User().findOne({'login': 'isic-admin'})
if not adminUser:
adminUser = User().createUser(
login='isic-admin',
password=None,
firstName='ISIC Archive',
lastName='Admin',
email='[email protected]',
admin=True,
public=False,
)
if settings.ISIC_ADMIN_PASSWORD:
User().setPassword(adminUser, settings.ISIC_ADMIN_PASSWORD, save=False)
adminUser['status'] = 'enabled'
User().save(adminUser)
else:
User().setPassword(adminUser, None, save=False)
adminUser['status'] = 'disabled'
# TODO: subsequent re-saves of this user will re-enable it, until another user is created
adminUser = User().save(adminUser, validate=False)
def _provisionAssetstore():
if not Assetstore().findOne({'name': 'assetstore'}):
if not settings.ISIC_ASSETSTORE_PATH.is_dir():
# This is expected to fail if the path is owned by root
settings.ISIC_ASSETSTORE_PATH.mkdir(parents=True)
Assetstore().createFilesystemAssetstore(
name='assetstore',
root=str(settings.ISIC_ASSETSTORE_PATH.resolve()),
)
def _provisionImages():
if not Group().findOne({'name': 'Dataset Contributors'}):
contributorsGroup = Group().createGroup(
name='Dataset Contributors',
creator=getAdminUser(),
description='Users that can create datasets',
public=True
)
Group().removeUser(contributorsGroup, getAdminUser())
reviewerGroup = Group().findOne({'name': 'Dataset QC Reviewers'})
if not reviewerGroup:
reviewerGroup = Group().createGroup(
name='Dataset QC Reviewers',
creator=getAdminUser(),
description='Users responsible for doing initial QC',
public=True
)
Group().removeUser(reviewerGroup, getAdminUser())
if not Collection().findOne({'name': 'Flagged Images'}):
flaggedCollection = Collection().createCollection(
name='Flagged Images',
creator=getAdminUser(),
description='Images that have been flagged for any reason',
public=False,
reuseExisting=False
)
flaggedCollection = Collection().setAccessList(
doc=flaggedCollection,
access={},
save=False
)
Collection().setGroupAccess(
doc=flaggedCollection,
group=reviewerGroup,
# TODO: make this a special access level
level=AccessType.READ,
save=True
)
imageCollection = Collection().createCollection(
name='Lesion Images',
creator=getAdminUser(),
description='All public lesion image datasets',
public=True,
reuseExisting=True
)
Collection().setAccessList(
doc=imageCollection,
access={},
save=True
)
def _provisionSegmentationGroups():
if not Group().findOne({'name': 'Segmentation Novices'}):
segmentationNovicesGroup = Group().createGroup(
name='Segmentation Novices',
creator=getAdminUser(),
description='Users able to tentatively segment lesion boundaries',
public=True
)
Group().removeUser(segmentationNovicesGroup, getAdminUser())
if not Group().findOne({'name': 'Segmentation Experts'}):
segmentationExpertsGroup = Group().createGroup(
name='Segmentation Experts',
creator=getAdminUser(),
description='Users able to definitively segment lesion boundaries',
public=True
)
Group().removeUser(segmentationExpertsGroup, getAdminUser())
def _provisionStudies():
studyAdminGroup = Group().findOne({'name': 'Study Administrators'})
if not studyAdminGroup:
studyAdminGroup = Group().createGroup(
name='Study Administrators',
creator=getAdminUser(),
description='Annotation study creators and administrators',
public=True
)
Group().removeUser(studyAdminGroup, getAdminUser())
studiesCollection = Collection().createCollection(
name='Annotation Studies',
creator=getAdminUser(),
description='Clinical feature annotation studies',
public=True,
reuseExisting=True
)
studiesCollection = Collection().setAccessList(
doc=studiesCollection,
access={},
save=False
)
Collection().setGroupAccess(
doc=studiesCollection,
group=studyAdminGroup,
# TODO: make this a special access level
level=AccessType.READ,
save=True
)
def _provisionTemporaryUploads():
uploadCollection = Collection().createCollection(
name='Temporary ZIP Uploads',
creator=getAdminUser(),
description='Temporary holding area for uploaded ZIP files',
public=False,
reuseExisting=True
)
uploadCollection = Collection().setAccessList(
doc=uploadCollection,
access={},
save=True
)
uploadFolder = Folder().createFolder(
name='Temporary ZIP Uploads',
parentType='collection',
parent=uploadCollection,
creator=getAdminUser(),
public=False,
reuseExisting=True
)
Folder().setAccessList(
doc=uploadFolder,
access={},
save=True
)
def provisionDatabase():
_setGirderSettings()
_setLargeImageSettings()
_provisionAdminUser()
_provisionAssetstore()
_provisionImages()
_provisionSegmentationGroups()
_provisionStudies()
_provisionTemporaryUploads()
| {
"content_hash": "91f017683b14771128b4690247f8ff8a",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 97,
"avg_line_length": 33.69230769230769,
"alnum_prop": 0.6433282597666159,
"repo_name": "ImageMarkup/isic-archive",
"id": "6b320db8f3850326bc6c592a9e304cc18954add0",
"size": "7884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isic_archive/provision_utility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8936"
},
{
"name": "Dockerfile",
"bytes": "838"
},
{
"name": "HTML",
"bytes": "56481"
},
{
"name": "JavaScript",
"bytes": "3778033"
},
{
"name": "Jinja",
"bytes": "6417"
},
{
"name": "Mako",
"bytes": "76622"
},
{
"name": "PEG.js",
"bytes": "2182"
},
{
"name": "Pug",
"bytes": "51086"
},
{
"name": "Python",
"bytes": "381336"
},
{
"name": "Shell",
"bytes": "30"
},
{
"name": "Stylus",
"bytes": "18670"
},
{
"name": "TeX",
"bytes": "50168"
},
{
"name": "Vue",
"bytes": "70286"
}
],
"symlink_target": ""
} |
import pytest
from Cauldron.utils.helpers import _Setting, api_not_required
from Cauldron.exc import CauldronAPINotImplemented
def test_setting_inverse():
"""Make a setting and test inverse setting."""
setting = _Setting("TESTSETTING", False)
inv = setting.inverse
assert not setting
assert inv
inv.off()
assert setting
assert not inv
def test_setting_lock():
"""Test a setting lock."""
lock = _Setting("TESTLOCK", False)
setting = _Setting("TESTSETTING", False, lock=lock)
setting.on()
assert setting
lock.on()
with pytest.raises(RuntimeError):
setting.off()
assert setting
lock.off()
setting.off()
assert not setting
def test_not_required():
"""Test the API not required decorator."""
@api_not_required
def some_not_required_method():
"""Documentation for some_not_required_method"""
pass
assert """Cauldron backends are not required to implement this function.""" in some_not_required_method.__doc__
assert """some_not_required_method""" in some_not_required_method.__doc__
with pytest.raises(CauldronAPINotImplemented):
some_not_required_method() | {
"content_hash": "f808e0a92306e75da060a0d01c4e8924",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 115,
"avg_line_length": 28.325581395348838,
"alnum_prop": 0.6576354679802956,
"repo_name": "alexrudy/Cauldron",
"id": "97d9d2845302658e9139335b6e95a30617e22c58",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Cauldron/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "840330"
}
],
"symlink_target": ""
} |
import subprocess
import unittest
import testbase
import default_cluster
import util
import os
import constant
import config
import time
import telnetlib
import signal
class TestBasicOp(unittest.TestCase):
cluster = config.clusters[2]
@classmethod
def setUpClass(cls):
return 0
@classmethod
def tearDownClass(cls):
return 0
def setUp(self):
util.set_process_logfile_prefix( 'TestBasicOp_%s' % self._testMethodName )
self.conf_checker= default_cluster.initialize_starting_up_smr_before_redis(self.cluster)
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown(self):
testbase.defaultTearDown(self)
def test_basic_op(self):
util.print_frame()
f = open("%s/test_basicop_output_redis" % constant.logdir, 'w')
p = util.exec_proc_async("../redis-%s" % constant.REDISVER,
"./runtest --accurate",
True, None, f, None)
ret = p.wait()
f.close()
self.assertEquals(0, ret)
def test_basic_op_smr(self):
util.print_frame()
f = open("%s/test_basicop_output_smr" % constant.logdir, 'w')
p = util.exec_proc_async("../redis-%s" % constant.REDISVER,
"./runtest_smr --accurate",
True, None, f, None)
ret = p.wait()
f.close()
self.assertEquals(0, ret)
def test_basic_op_gateway(self):
util.print_frame()
ip, port = util.get_rand_gateway(self.cluster)
f = open("%s/test_basicop_output_gw" % constant.logdir, 'w')
p = util.exec_proc_async("../redis-%s" % constant.REDISVER,
"./runtest_gw --accurate --gw-port "+str(port),
True, None, f, None)
ret = p.wait()
f.close()
self.assertEquals(0, ret)
def run_capi_server(self):
# run capi test server
_capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 10000
""" % self.cluster['cluster_name']
old_cwd = os.path.abspath( os.getcwd() )
os.chdir(util.capi_dir(0))
f = open('capi_server.conf', 'w')
f.write(_capi_server_conf)
f.close()
os.chdir(old_cwd)
if self.arch is 32:
cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
else:
cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER
capi_server = util.exec_proc_async(util.capi_dir(0),
cmd, True, None, subprocess.PIPE, None)
# ping check
while True:
try:
t = telnetlib.Telnet('127.0.0.1', 6200)
break
except:
time.sleep(1)
continue
t.write("ping\r\n")
t.read_until('+PONG\r\n')
t.close()
return capi_server
def stop_process(self, capi_server):
capi_server.send_signal(signal.SIGTERM)
capi_server.wait()
| {
"content_hash": "1bfa89e8840f7b2720df64148ae069c5",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 96,
"avg_line_length": 28.73913043478261,
"alnum_prop": 0.5697428139183056,
"repo_name": "cl9200/nbase-arc",
"id": "9a7772a524302402c24dd2f1485d5fbd55e5a2d0",
"size": "3885",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integration_test/test_basic_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "C",
"bytes": "5284770"
},
{
"name": "C++",
"bytes": "349295"
},
{
"name": "CSS",
"bytes": "1647"
},
{
"name": "HTML",
"bytes": "394813"
},
{
"name": "Java",
"bytes": "2592189"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "M4",
"bytes": "62540"
},
{
"name": "Makefile",
"bytes": "183577"
},
{
"name": "Objective-C",
"bytes": "1114"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "812422"
},
{
"name": "Roff",
"bytes": "924986"
},
{
"name": "Ruby",
"bytes": "5126"
},
{
"name": "Shell",
"bytes": "267485"
},
{
"name": "Smarty",
"bytes": "1023"
},
{
"name": "Tcl",
"bytes": "940027"
},
{
"name": "XSLT",
"bytes": "303"
}
],
"symlink_target": ""
} |
from skbeam.fluorescence import * # noqa: F401, F403
| {
"content_hash": "2771c13be9d36f87ee4a12da33f6ced9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 53,
"avg_line_length": 54,
"alnum_prop": 0.7407407407407407,
"repo_name": "Nikea/scikit-xray",
"id": "6f8506f998ce77fa06fcd2ec862b6d01b021611d",
"size": "93",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "skbeam/tests/test_fluorescence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "C",
"bytes": "18904"
},
{
"name": "Python",
"bytes": "653066"
},
{
"name": "Shell",
"bytes": "38"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Council",
fields=[
(
"council_id",
models.CharField(max_length=100, serialize=False, primary_key=True),
),
("council_type", models.CharField(max_length=10, blank=True)),
("mapit_id", models.CharField(max_length=100, blank=True)),
("name", models.CharField(max_length=255, blank=True)),
("email", models.EmailField(max_length=75, blank=True)),
("phone", models.CharField(max_length=100, blank=True)),
("website", models.URLField(blank=True)),
("postcode", models.CharField(max_length=100, null=True, blank=True)),
("address", models.TextField(null=True, blank=True)),
(
"location",
django.contrib.gis.db.models.fields.PointField(
srid=4326, null=True, blank=True
),
),
(
"area",
django.contrib.gis.db.models.fields.MultiPolygonField(
srid=4326, null=True, geography=True, blank=True
),
),
],
options={},
bases=(models.Model,),
)
]
| {
"content_hash": "d8d7922050cb2e43e84cf1a96831c30e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 88,
"avg_line_length": 37.5609756097561,
"alnum_prop": 0.4831168831168831,
"repo_name": "DemocracyClub/UK-Polling-Stations",
"id": "3e1bac8f67cfa6f88c8920b067b013028a6ea89b",
"size": "1566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/councils/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "85540"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "1111337"
},
{
"name": "SCSS",
"bytes": "5742"
}
],
"symlink_target": ""
} |
import cProfile
import json
import scipy
import numpy as np
import icgauge
from icgauge import experiment_frameworks
print "Punctuation test framework:"
print " =================================== "
print " First test: dataset with punctuation"
print " =================================== "
print " Data: practice, test, toy dataset"
print " Features: all features"
print " Labels: original 7-point scale"
print " Model: logistic regression (classification)"
print
corr, alpha, conf_matrix, details = experiment_frameworks.experiment_features_iterated(
train_reader=icgauge.data_readers.punctuated_set,
assess_reader=None,
train_size=0.7,
phi_list=[icgauge.feature_extractors.all_features],
class_func=icgauge.label_transformers.identity_class_func,
train_func=icgauge.training_functions.fit_logistic_at,
score_func=scipy.stats.stats.pearsonr,
verbose=False,
iterations=10)
print "\n-- AFTER COMPLETION --"
print "Averaged correlation (95% CI): "
print np.round(np.mean(corr),2), "+/-", np.round(np.std(corr),2)
print "All correlations:"
print corr
print
print "Averaged Cronbach's alpha (95% CI): "
print np.round(np.mean(alpha),2), "+/-", np.round(np.std(alpha),2)
print "All alphas:"
print alpha
print
print "Confusion matrix:"
print conf_matrix
"""
print " =================================== "
print " Second test: dataset without punctuation"
print " =================================== "
print " Data: practice, test, toy dataset"
print " Features: all features"
print " Labels: original 7-point scale"
print " Model: logistic regression (classification)"
print
corr, alpha, conf_matrix, details = experiment_frameworks.experiment_features_iterated(
train_reader=icgauge.data_readers.unpunctuated_set,
assess_reader=None,
train_size=0.7,
phi_list=[icgauge.feature_extractors.all_features],
class_func=icgauge.label_transformers.identity_class_func,
train_func=icgauge.training_functions.fit_logistic_at,
score_func=scipy.stats.stats.pearsonr,
verbose=False,
iterations=10)
print "\n-- AFTER COMPLETION --"
print "Averaged correlation (95% CI): "
print np.round(np.mean(corr),2), "+/-", np.round(np.std(corr),2)
print "All correlations:"
print corr
print
print "Averaged Cronbach's alpha (95% CI): "
print np.round(np.mean(alpha),2), "+/-", np.round(np.std(alpha),2)
print "All alphas:"
print alpha
print
print "Confusion matrix:"
print conf_matrix
""" | {
"content_hash": "338378ef8a3b8f306899f6d05d81f28e",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 87,
"avg_line_length": 29.903614457831324,
"alnum_prop": 0.6772763900080581,
"repo_name": "ptoman/icgauge",
"id": "7bbcf00e0a2b273db913b12b45da1c4980a75a90",
"size": "2911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/punctuation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12350022"
},
{
"name": "Python",
"bytes": "146771"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
AVAILABLE_PROTOCOLS = ('HTTP', 'HTTPS', 'TCP')
AVAILABLE_METHODS = ('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP')
class AddPoolAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
# provider is optional because some LBaaS implemetation does
# not support service-type extension.
provider = forms.ChoiceField(label=_("Provider"), required=False)
subnet_id = forms.ChoiceField(label=_("Subnet"))
protocol = forms.ChoiceField(label=_("Protocol"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddPoolAction, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
subnet_id_choices = [('', _("Select a Subnet"))]
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
protocol_choices = [('', _("Select a Protocol"))]
[protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS]
self.fields['protocol'].choices = protocol_choices
lb_method_choices = [('', _("Select a Method"))]
[lb_method_choices.append((m, m)) for m in AVAILABLE_METHODS]
self.fields['lb_method'].choices = lb_method_choices
# provider choice
try:
if api.neutron.is_extension_supported(request, 'service-type'):
provider_list = api.neutron.provider_list(request)
providers = [p for p in provider_list
if p['service_type'] == 'LOADBALANCER']
else:
providers = None
except Exception:
exceptions.handle(request,
_('Unable to retrieve providers list.'))
providers = []
if providers:
default_providers = [p for p in providers if p.get('default')]
if default_providers:
default_provider = default_providers[0]['name']
else:
default_provider = None
provider_choices = [(p['name'], p['name']) for p in providers
if p['name'] != default_provider]
if default_provider:
provider_choices.insert(
0, (default_provider,
_("%s (default)") % default_provider))
else:
if providers is None:
msg = _("Provider for Load Balancer is not supported.")
else:
msg = _("No provider is available.")
provider_choices = [('', msg)]
self.fields['provider'].widget.attrs['readonly'] = True
self.fields['provider'].choices = provider_choices
class Meta:
name = _("Add New Pool")
permissions = ('openstack.services.network',)
help_text = _("Create Pool for current project.\n\n"
"Assign a name and description for the pool. "
"Choose one subnet where all members of this "
"pool must be on. "
"Select the protocol and load balancing method "
"for this pool. "
"Admin State is UP (checked) by default.")
class AddPoolStep(workflows.Step):
action_class = AddPoolAction
contributes = ("name", "description", "subnet_id", "provider",
"protocol", "lb_method", "admin_state_up")
def contribute(self, data, context):
context = super(AddPoolStep, self).contribute(data, context)
if data:
return context
class AddPool(workflows.Workflow):
slug = "addpool"
name = _("Add Pool")
finalize_button_name = _("Add")
success_message = _('Added pool "%s".')
failure_message = _('Unable to add pool "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPoolStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
try:
api.lbaas.pool_create(request, **context)
return True
except Exception:
return False
class AddVipAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
floatip_address = forms.ChoiceField(
label=_("VIP Address from Floating IPs"),
widget=forms.Select(attrs={'disabled': 'disabled'}),
required=False)
other_address = fields.IPField(required=False,
initial="",
version=fields.IPv4,
mask=False)
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validators.validate_port_range])
protocol = forms.ChoiceField(label=_("Protocol"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."))
connection_limit = forms.IntegerField(
required=False, min_value=-1, label=_("Connection Limit"),
help_text=_("Maximum number of connections allowed "
"for the VIP or '-1' if the limit is not set"))
admin_state_up = forms.BooleanField(
label=_("Admin State"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVipAction, self).__init__(request, *args, **kwargs)
self.fields['other_address'].label = _("Specify a free IP address"
" from %s" %
args[0]['subnet'])
protocol_choices = [('', _("Select a Protocol"))]
[protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS]
self.fields['protocol'].choices = protocol_choices
session_persistence_choices = [('', _("No Session Persistence"))]
for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'):
session_persistence_choices.append((mode, mode))
self.fields[
'session_persistence'].choices = session_persistence_choices
floatip_address_choices = [('', _("Currently Not Supported"))]
self.fields['floatip_address'].choices = floatip_address_choices
def clean(self):
cleaned_data = super(AddVipAction, self).clean()
if (cleaned_data.get('session_persistence') == 'APP_COOKIE' and
not cleaned_data.get('cookie_name')):
msg = _('Cookie name is required for APP_COOKIE persistence.')
self._errors['cookie_name'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Specify VIP")
permissions = ('openstack.services.network',)
help_text = _("Create a VIP for this pool. "
"Assign a name and description for the VIP. "
"Specify an IP address and port for the VIP. "
"Choose the protocol and session persistence "
"method for the VIP."
"Specify the max connections allowed. "
"Admin State is UP (checked) by default.")
class AddVipStep(workflows.Step):
action_class = AddVipAction
depends_on = ("pool_id", "subnet")
contributes = ("name", "description", "floatip_address",
"other_address", "protocol_port", "protocol",
"session_persistence", "cookie_name",
"connection_limit", "admin_state_up")
def contribute(self, data, context):
context = super(AddVipStep, self).contribute(data, context)
return context
class AddVip(workflows.Workflow):
slug = "addvip"
name = _("Add VIP")
finalize_button_name = _("Add")
success_message = _('Added VIP "%s".')
failure_message = _('Unable to add VIP "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddVipStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
if context['other_address'] == '':
context['address'] = context['floatip_address']
else:
if not context['floatip_address'] == '':
self.failure_message = _('Only one address can be specified. '
'Unable to add VIP "%s".')
return False
else:
context['address'] = context['other_address']
try:
pool = api.lbaas.pool_get(request, context['pool_id'])
context['subnet_id'] = pool['subnet_id']
except Exception:
context['subnet_id'] = None
self.failure_message = _('Unable to retrieve the specified pool. '
'Unable to add VIP "%s".')
return False
if context['session_persistence']:
stype = context['session_persistence']
if stype == 'APP_COOKIE':
cookie = context['cookie_name']
context['session_persistence'] = {'type': stype,
'cookie_name': cookie}
else:
context['session_persistence'] = {'type': stype}
else:
context['session_persistence'] = {}
try:
api.lbaas.vip_create(request, **context)
return True
except Exception:
return False
class AddMemberAction(workflows.Action):
pool_id = forms.ChoiceField(label=_("Pool"))
members = forms.MultipleChoiceField(
label=_("Member(s)"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
error_messages={'required':
_('At least one member must be specified')},
help_text=_("Select members for this pool "))
weight = forms.IntegerField(max_value=256, min_value=0, label=_("Weight"),
required=False,
help_text=_("Relative part of requests this "
"pool member serves compared to others"))
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validators.validate_port_range])
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMemberAction, self).__init__(request, *args, **kwargs)
pool_id_choices = [('', _("Select a Pool"))]
try:
pools = api.lbaas.pools_get(request)
except Exception:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
members_choices = []
try:
servers, has_more = api.nova.server_list(request)
except Exception:
servers = []
exceptions.handle(request,
_('Unable to retrieve instances list.'))
if len(servers) == 0:
self.fields['members'].label = _("No servers available. "
"Click Add to cancel.")
self.fields['members'].required = False
self.fields['members'].help_text = _("Select members "
"for this pool ")
self.fields['pool_id'].required = False
self.fields['protocol_port'].required = False
return
for m in servers:
members_choices.append((m.id, m.name))
self.fields['members'].choices = sorted(
members_choices,
key=lambda member: member[1])
class Meta:
name = _("Add New Member")
permissions = ('openstack.services.network',)
help_text = _("Add member to selected pool.\n\n"
"Choose one or more listed instances to be "
"added to the pool as member(s). "
"Assign a numeric weight for this member "
"Specify the port number the member(s) "
"operate on; e.g., 80.")
class AddMemberStep(workflows.Step):
action_class = AddMemberAction
contributes = ("pool_id", "members", "protocol_port", "weight",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMemberStep, self).contribute(data, context)
return context
class AddMember(workflows.Workflow):
slug = "addmember"
name = _("Add Member")
finalize_button_name = _("Add")
success_message = _('Added member(s).')
failure_message = _('Unable to add member(s).')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMemberStep,)
def handle(self, request, context):
for m in context['members']:
params = {'device_id': m}
try:
plist = api.neutron.port_list(request, **params)
except Exception:
return False
if plist:
context['address'] = plist[0].fixed_ips[0]['ip_address']
try:
context['member_id'] = api.lbaas.member_create(
request, **context).id
except Exception:
return False
return True
class AddMonitorAction(workflows.Action):
type = forms.ChoiceField(
label=_("Type"),
choices=[('ping', _('PING')),
('tcp', _('TCP')),
('http', _('HTTP')),
('https', _('HTTPS'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'type'
}))
delay = forms.IntegerField(
min_value=1,
label=_("Delay"),
help_text=_("The minimum time in seconds between regular checks "
"of a member"))
timeout = forms.IntegerField(
min_value=1,
label=_("Timeout"),
help_text=_("The maximum time in seconds for a monitor to wait "
"for a reply"))
max_retries = forms.IntegerField(
max_value=10, min_value=1,
label=_("Max Retries (1~10)"),
help_text=_("Number of permissible failures before changing "
"the status of member to inactive"))
http_method = forms.ChoiceField(
initial="GET",
required=False,
choices=[('GET', _('GET'))],
label=_("HTTP Method"),
help_text=_("HTTP method used to check health status of a member"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('HTTP Method'),
'data-type-https': _('HTTP Method')
}))
url_path = forms.CharField(
initial="/",
required=False,
max_length=80,
label=_("URL"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('URL'),
'data-type-https': _('URL')
}))
expected_codes = forms.RegexField(
initial="200",
required=False,
max_length=80,
regex=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$',
label=_("Expected HTTP Status Codes"),
help_text=_("Expected code may be a single value (e.g. 200), "
"a list of values (e.g. 200, 202), "
"or range of values (e.g. 200-204)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('Expected HTTP Status Codes'),
'data-type-https': _('Expected HTTP Status Codes')
}))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMonitorAction, self).__init__(request, *args, **kwargs)
def clean(self):
cleaned_data = super(AddMonitorAction, self).clean()
type_opt = cleaned_data.get('type')
if type_opt in ['http', 'https']:
http_method_opt = cleaned_data.get('http_method')
url_path = cleaned_data.get('url_path')
expected_codes = cleaned_data.get('expected_codes')
if not http_method_opt:
msg = _('Please choose a HTTP method')
self._errors['http_method'] = self.error_class([msg])
if not url_path:
msg = _('Please specify an URL')
self._errors['url_path'] = self.error_class([msg])
if not expected_codes:
msg = _('Please enter a single value (e.g. 200), '
'a list of values (e.g. 200, 202), '
'or range of values (e.g. 200-204)')
self._errors['expected_codes'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Add New Monitor")
permissions = ('openstack.services.network',)
help_text = _("Create a monitor template.\n\n"
"Select type of monitoring. "
"Specify delay, timeout, and retry limits "
"required by the monitor. "
"Specify method, URL path, and expected "
"HTTP codes upon success.")
class AddMonitorStep(workflows.Step):
action_class = AddMonitorAction
contributes = ("type", "delay", "timeout", "max_retries",
"http_method", "url_path", "expected_codes",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMonitorStep, self).contribute(data, context)
if data:
return context
class AddMonitor(workflows.Workflow):
slug = "addmonitor"
name = _("Add Monitor")
finalize_button_name = _("Add")
success_message = _('Added monitor')
failure_message = _('Unable to add monitor')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMonitorStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_health_monitor_create(
request, **context).get('id')
return True
except Exception:
exceptions.handle(request, _("Unable to add monitor."))
return False
class AddPMAssociationAction(workflows.Action):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(AddPMAssociationAction, self).__init__(request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = _("Select a monitor template "
"for %s" % context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
monitors = api.lbaas.pool_health_monitors_get(request)
for m in monitors:
if m.id not in context['pool_monitors']:
monitor_id_choices.append((m.id, m.id))
except Exception:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Associate a health monitor with target pool.")
class AddPMAssociationStep(workflows.Step):
action_class = AddPMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(AddPMAssociationStep, self).contribute(data, context)
if data:
return context
class AddPMAssociation(workflows.Workflow):
slug = "addassociation"
name = _("Add Association")
finalize_button_name = _("Add")
success_message = _('Added association.')
failure_message = _('Unable to add association.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_create(
request, **context)
return True
except Exception:
exceptions.handle(request, _("Unable to add association."))
return False
class DeletePMAssociationAction(workflows.Action):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(DeletePMAssociationAction, self).__init__(
request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = (_("Select a health monitor of %s") %
context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
for m_id in context['pool_monitors']:
monitor_id_choices.append((m_id, m_id))
except Exception:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Disassociate a health monitor from target pool. ")
class DeletePMAssociationStep(workflows.Step):
action_class = DeletePMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(DeletePMAssociationStep, self).contribute(
data, context)
if data:
return context
class DeletePMAssociation(workflows.Workflow):
slug = "deleteassociation"
name = _("Delete Association")
finalize_button_name = _("Delete")
success_message = _('Deleted association.')
failure_message = _('Unable to delete association.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (DeletePMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_delete(
request, **context)
return True
except Exception:
exceptions.handle(request, _("Unable to delete association."))
return False
| {
"content_hash": "0f6824e3af41feec88bc74355bbae826",
"timestamp": "",
"source": "github",
"line_count": 625,
"max_line_length": 79,
"avg_line_length": 39.2672,
"alnum_prop": 0.5511775731399234,
"repo_name": "Havate/havate-openstack",
"id": "356eb84aa693e8281673cdfdbc238db82390d4e6",
"size": "25209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/loadbalancers/workflows.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407618"
},
{
"name": "HTML",
"bytes": "507406"
},
{
"name": "JavaScript",
"bytes": "25322"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "21665856"
},
{
"name": "Shell",
"bytes": "62617"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from botocore.exceptions import ClientError
from c7n.actions import ActionRegistry, AutoTagUser, BaseAction
from c7n.filters import CrossAccountAccessFilter, FilterRegistry, ValueFilter
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction
from c7n.utils import get_retry, local_session, type_schema
filters = FilterRegistry('lambda.filters')
actions = ActionRegistry('lambda.actions')
filters.register('marked-for-op', TagActionFilter)
actions.register('auto-tag-user', AutoTagUser)
@resources.register('lambda')
class AWSLambda(QueryResourceManager):
class resource_type(object):
service = 'lambda'
type = 'function'
enum_spec = ('list_functions', 'Functions', None)
name = id = 'FunctionName'
filter_name = None
date = 'LastModified'
dimension = 'FunctionName'
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('Throttled',)))
def augment(self, functions):
resources = super(AWSLambda, self).augment(functions)
return filter(None, _lambda_function_tags(
self.get_model(),
resources,
self.session_factory,
self.executor_factory,
self.retry,
self.log))
def _lambda_function_tags(
model, functions, session_factory, executor_factory, retry, log):
""" Augment Lambda function with their respective tags
"""
def process_tags(function):
client = local_session(session_factory).client('lambda')
arn = function['FunctionArn']
try:
tag_dict = retry(client.list_tags, Resource=arn)['Tags']
except ClientError as e:
log.warning("Exception getting Lambda tags \n %s", e)
return None
tag_list = []
for k, v in tag_dict.items():
tag_list.append({'Key': k, 'Value': v})
function['Tags'] = tag_list
return function
with executor_factory(max_workers=2) as w:
return list(w.map(process_tags, functions))
def tag_function(session_factory, functions, tags, log):
client = local_session(session_factory).client('lambda')
tag_dict = {}
for t in tags:
tag_dict[t['Key']] = t['Value']
for f in functions:
arn = f['FunctionArn']
try:
client.tag_resource(Resource=arn, Tags=tag_dict)
except Exception as err:
log.exception(
'Exception tagging lambda function %s: %s',
f['FunctionName'], err)
continue
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "VpcConfig.SecurityGroupIds[]"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "VpcConfig.SubnetIds[]"
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('event-source')
class LambdaEventSource(ValueFilter):
# this uses iam policy, it should probably use
# event source mapping api
annotation_key = "c7n:EventSources"
schema = type_schema('event-source', rinherit=ValueFilter.schema)
permissions = ('lambda:GetPolicy',)
def process(self, resources, event=None):
def _augment(r):
if 'c7n:Policy' in r:
return
client = local_session(
self.manager.session_factory).client('lambda')
try:
r['c7n:Policy'] = client.get_policy(
FunctionName=r['FunctionName'])['Policy']
return r
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDeniedException':
self.log.warning(
"Access denied getting policy lambda:%s",
r['FunctionName'])
self.log.debug("fetching policy for %d lambdas" % len(resources))
self.data['key'] = self.annotation_key
with self.executor_factory(max_workers=3) as w:
resources = filter(None, w.map(_augment, resources))
return super(LambdaEventSource, self).process(resources, event)
def __call__(self, r):
if 'c7n:Policy' not in r:
return False
sources = set()
data = json.loads(r['c7n:Policy'])
for s in data.get('Statement', ()):
if s['Effect'] != 'Allow':
continue
if 'Service' in s['Principal']:
sources.add(s['Principal']['Service'])
if sources:
r[self.annotation_key] = list(sources)
return self.match(r)
@filters.register('cross-account')
class LambdaCrossAccountAccessFilter(CrossAccountAccessFilter):
"""Filters lambda functions with cross-account permissions
The whitelist parameter can be used to prevent certain accounts
from being included in the results (essentially stating that these
accounts permissions are allowed to exist)
This can be useful when combining this filter with the delete action.
:example:
.. code-block: yaml
policies:
- name: lambda-cross-account
resource: lambda
filters:
- type: cross-account
whitelist:
- 'IAM-Policy-Cross-Account-Access'
"""
permissions = ('lambda:GetPolicy',)
def process(self, resources, event=None):
def _augment(r):
client = local_session(
self.manager.session_factory).client('lambda')
try:
r['Policy'] = client.get_policy(
FunctionName=r['FunctionName'])['Policy']
return r
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDeniedException':
self.log.warning(
"Access denied getting policy lambda:%s",
r['FunctionName'])
self.log.debug("fetching policy for %d lambdas" % len(resources))
with self.executor_factory(max_workers=3) as w:
resources = filter(None, w.map(_augment, resources))
return super(LambdaCrossAccountAccessFilter, self).process(
resources, event)
@actions.register('mark-for-op')
class TagDelayedAction(TagDelayedAction):
"""Action to specify an action to occur at a later date
:example:
.. code-block: yaml
policies:
- name: lambda-delete-unused
resource: lambda
filters:
- "tag:custodian_cleanup": absent
actions:
- type: mark-for-op
tag: custodian_cleanup
msg: "Unused lambda"
op: delete
days: 7
"""
permissions = ('lambda:TagResource',)
def process_resource_set(self, functions, tags):
tag_function(self.manager.session_factory, functions, tags, self.log)
@actions.register('tag')
class Tag(Tag):
"""Action to add tag(s) to Lambda Function(s)
:example:
.. code-block: yaml
policies:
- name: lambda-add-owner-tag
resource: lambda
filters:
- "tag:OwnerName": missing
actions:
- type: tag
key: OwnerName
value: OwnerName
"""
permissions = ('lambda:TagResource',)
def process_resource_set(self, functions, tags):
tag_function(self.manager.session_factory, functions, tags, self.log)
@actions.register('remove-tag')
class RemoveTag(RemoveTag):
"""Action to remove tag(s) from Lambda Function(s)
:example:
.. code-block: yaml
policies:
- name: lambda-remove-old-tag
resource: lambda
filters:
- "tag:OldTagKey": present
actions:
- type: remove-tag
tags: [OldTagKey1, OldTagKey2]
"""
permissions = ('lambda:UntagResource',)
def process_resource_set(self, functions, tag_keys):
client = local_session(self.manager.session_factory).client('lambda')
for f in functions:
arn = f['FunctionArn']
client.untag_resource(Resource=arn, TagKeys=tag_keys)
@actions.register('delete')
class Delete(BaseAction):
"""Delete a lambda function (including aliases and older versions).
:example:
.. code-block: yaml
policies:
- name: lambda-delete-dotnet-functions
resource: lambda
filters:
- Runtime: dotnetcore1.0
actions:
- delete
"""
schema = type_schema('delete')
permissions = ("lambda:DeleteFunction",)
def process(self, functions):
client = local_session(self.manager.session_factory).client('lambda')
for function in functions:
try:
client.delete_function(FunctionName=function['FunctionName'])
except ClientError as e:
if e.response['Error']['Code'] == "ResourceNotFoundException":
continue
raise
self.log.debug("Deleted %d functions", len(functions))
| {
"content_hash": "fa6a60adae0352b19ebbf45324a3738c",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 82,
"avg_line_length": 31.70957095709571,
"alnum_prop": 0.5868026644462948,
"repo_name": "VeritasOS/cloud-custodian",
"id": "1f178f7238979553dd15dd2dfd0b80df73c6b00a",
"size": "10193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/resources/awslambda.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1247"
},
{
"name": "Python",
"bytes": "1492218"
}
],
"symlink_target": ""
} |
from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
## String ID test
########################################
class SOStringID(SQLObject):
class sqlmeta(sqlmeta):
table = 'so_string_id'
idType = str
val = StringCol(alternateID=True)
mysqlCreate = """
CREATE TABLE IF NOT EXISTS so_string_id (
id VARCHAR(50) PRIMARY KEY,
val TEXT
)
"""
postgresCreate = """
CREATE TABLE so_string_id (
id VARCHAR(50) PRIMARY KEY,
val TEXT
)
"""
sybaseCreate = """
CREATE TABLE so_string_id (
id VARCHAR(50) UNIQUE,
val VARCHAR(50) NULL
)
"""
firebirdCreate = """
CREATE TABLE so_string_id (
id VARCHAR(50) NOT NULL PRIMARY KEY,
val BLOB SUB_TYPE TEXT
)
"""
mssqlCreate = """
CREATE TABLE so_string_id (
id VARCHAR(50) PRIMARY KEY,
val varchar(4000)
)
"""
sqliteCreate = postgresCreate
mysqlDrop = """
DROP TABLE IF EXISTS so_string_id
"""
postgresDrop = """
DROP TABLE so_string_id
"""
sqliteDrop = postgresDrop
firebirdDrop = postgresDrop
mssqlDrop = postgresDrop
def test_stringID():
setupClass(SOStringID)
t1 = SOStringID(id='hey', val='whatever')
t2 = SOStringID.byVal('whatever')
assert t1 == t2
assert t1.val == t2.val
assert t1.val == 'whatever'
t1 = SOStringID(id='you', val='nowhere')
t2 = SOStringID.get('you')
assert t1 == t2
assert t1.val == t2.val
assert t1.val == 'nowhere'
| {
"content_hash": "cd941ddd426c863f3bc619b583be45ac",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 45,
"avg_line_length": 20.842105263157894,
"alnum_prop": 0.5587121212121212,
"repo_name": "lightcode/SeriesWatcher",
"id": "05891adc233202ae6d7b3870d7f0f659895afd37",
"size": "1584",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "serieswatcher/sqlobject/tests/test_string_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2594"
},
{
"name": "JavaScript",
"bytes": "1771"
},
{
"name": "Python",
"bytes": "1561181"
},
{
"name": "R",
"bytes": "2748"
}
],
"symlink_target": ""
} |
"""Tests for API client and approvals-related API calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import threading
import time
from absl import app
from grr_response_core.lib.util import compatibility
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import api_call_router_with_approval_checks
from grr_response_server.gui import api_integration_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
class ApiClientLibApprovalsTest(api_integration_test_lib.ApiIntegrationTest,
hunt_test_lib.StandardHuntTestMixin):
def setUp(self):
super(ApiClientLibApprovalsTest, self).setUp()
cls = api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks
cls.ClearCache()
config_overrider = test_lib.ConfigOverrider(
{"API.DefaultRouter": compatibility.GetName(cls)})
config_overrider.Start()
self.addCleanup(config_overrider.Stop)
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.InitializeApiAuthManager()
def testCreateClientApproval(self):
client_id = self.SetupClient(0)
self.CreateUser("foo")
approval = self.api.Client(client_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertEqual(approval.client_id, client_id)
self.assertEqual(approval.data.subject.client_id, client_id)
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilClientApprovalValid(self):
client_id = self.SetupClient(0)
self.CreateUser("foo")
approval = self.api.Client(client_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantClientApproval(
client_id,
requestor=self.token.username,
approval_id=approval.approval_id,
approver=u"foo")
thread = threading.Thread(name="ProcessApprover", target=ProcessApproval)
thread.start()
try:
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
finally:
thread.join()
def testCreateHuntApproval(self):
h_id = self.StartHunt()
self.CreateUser("foo")
approval = self.api.Hunt(h_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertEqual(approval.hunt_id, h_id)
self.assertEqual(approval.data.subject.hunt_id, h_id)
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilHuntApprovalValid(self):
self.CreateUser("approver")
h_id = self.StartHunt()
approval = self.api.Hunt(h_id).CreateApproval(
reason="blah", notified_users=[u"approver"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantHuntApproval(
h_id,
requestor=self.token.username,
approval_id=approval.approval_id,
approver=u"approver")
ProcessApproval()
thread = threading.Thread(name="HuntApprover", target=ProcessApproval)
thread.start()
try:
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
finally:
thread.join()
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "e14b13363504fb4a55ef62c6a6d5e7c4",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 31.017543859649123,
"alnum_prop": 0.7010746606334841,
"repo_name": "dunkhong/grr",
"id": "7eee8358b8e1b75c71f1e125f7047ebc21c83991",
"size": "3558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/gui/api_integration_tests/approvals_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import time
import socket
from heat.openstack.common import log as logging
import heat.engine.resources.cloudmanager.util.conf_util as conf_util
from heat.engine.resources.cloudmanager.util.subnet_manager import SubnetManager
from heat.engine.resources.cloudmanager.exception import *
from heat.engine.resources.cloudmanager.environmentinfo import *
import heat.engine.resources.cloudmanager.proxy_manager as proxy_manager
import heat.engine.resources.cloudmanager.constant as constant
from hws_util import *
from hws_cloud_info_persist import *
import pdb
LOG = logging.getLogger(__name__)
SUBNET_GATEWAY_TAIL_IP = "1"
VPN_TAIL_IP = "254"
CASCADED_TAIL_IP = "4"
ROOT_VOLUME_TYPE = 'SATA'
class HwsCascadedInstaller(object):
def __init__(self, cloud_params):
self._init_params(cloud_params)
self._read_env()
self._read_install_info()
start_hws_gateway(self.cascading_api_ip, constant.Cascading.ROOT,
constant.Cascading.ROOT_PWD)
def _init_params(self, cloud_params):
self.cloud_params = cloud_params
self._read_default_conf()
self.cloud_id = "@".join(["HWS", self.cloud_params['azname']])
project_info = self.cloud_params["project_info"]
ak = project_info["access_key"]
sk = project_info["secret_key"]
region = project_info["region"]
project_id = project_info["project_id"]
port = self.default_port
host = self.default_host
protocol = self.default_protocol
self.installer = HwsInstaller(ak, sk, region, protocol, host, port, project_id)
self.cascaded_image_id = self.installer.get_image_id(name=self.default_cascaded_image_name)
self.cascaded_flavor = self.default_cascaded_flavor
self.vpn_image_id = self.installer.get_image_id(name=self.default_vpn_image_name)
self.vpn_flavor = self.default_vpn_flavor
self.availability_zone = self.cloud_params["project_info"]["availability_zone"]
self.install_data_handler = \
HwsCloudInfoPersist(constant.HwsConstant.INSTALL_INFO_FILE, self.cloud_id)
self.cloud_info_handler = \
HwsCloudInfoPersist(constant.HwsConstant.CLOUD_INFO_FILE, self.cloud_id)
def _read_env(self):
try:
env_info = conf_util.read_conf(constant.Cascading.ENV_FILE)
self.env = env_info["env"]
self.cascading_api_ip = env_info["cascading_api_ip"]
self.cascading_domain = env_info["cascading_domain"]
self.cascading_vpn_ip = env_info["local_vpn_ip"]
self.cascading_vpn_public_gw = env_info["local_vpn_public_gw"]
self.cascading_eip = env_info["cascading_eip"]
self.cascading_api_subnet = env_info["local_api_subnet"]
self.cascading_vpn_api_ip = env_info["local_vpn_api_ip"]
self.cascading_tunnel_subnet = env_info["local_tunnel_subnet"]
self.cascading_vpn_tunnel_ip = env_info["local_vpn_tunnel_ip"]
self.existed_cascaded = env_info["existed_cascaded"]
except IOError as e:
error = "read file = %s error" % constant.Cascading.ENV_FILE
LOG.error(e)
raise ReadEnvironmentInfoFailure(error = error)
except KeyError as e:
error = "read key = %s error in file = %s" % (e.message, constant.Cascading.ENV_FILE )
LOG.error(error)
raise ReadEnvironmentInfoFailure(error = error)
def _read_default_conf(self):
try:
self.default_params = conf_util.read_conf(constant.Cascading.HWS_CONF_FILE)
self.default_protocol = self.default_params["project_info"]["protocol"]
self.default_port = self.default_params["project_info"]["port"]
self.default_host = self.default_params["project_info"]["host"]
image_info = self.default_params["image"]
self.default_cascaded_image_name = image_info["cascaded_image"]
self.default_cascaded_flavor = image_info["cascaded_flavor"]
self.default_vpn_image_name = image_info["vpn_image"]
self.default_vpn_flavor = image_info["vpn_flavor"]
network = self.default_params["network"]
self.default_vpc_cidr = network["vpc_cidr"]
self.default_external_api_cidr = network["external_api_cidr"]
self.default_tunnel_bearing_cidr = network["tunnel_bearing_cidr"]
self.default_internal_base_cidr = network["internal_base_cidr"]
self.default_debug_cidr = network["debug_cidr"]
except IOError as e:
error = "read file = %s error" % constant.Cascading.HWS_CONF_FILE
LOG.error(e)
raise ReadEnvironmentInfoFailure(error = error)
except KeyError as e:
error = "read key = %s error in file = %s" % (e.message, constant.Cascading.HWS_CONF_FILE)
LOG.error(error)
raise ReadEnvironmentInfoFailure(error = error)
def _create_vpc(self):
name = self.cloud_params["azname"]+"_vpc"
cidr = self.vpc_cidr
if self.vpc_id is None:
self.vpc_id = self.installer.create_vpc(name, cidr)
try:
if self.security_group_id is None:
security_group_id = self.installer.get_security_group(self.vpc_id)
self.installer.create_security_group_rule(
security_group_id, "ingress", "IPv4")
self.security_group_id = security_group_id
finally:
self.install_data_handler.write_vpc_info(self.vpc_id, name, cidr, self.security_group_id)
def _delete_vpc(self):
if self.vpc_id:
self.installer.delete_vpc(self.vpc_id)
def _create_subnet(self):
az = self.availability_zone
self.external_api_gateway = self._alloc_gateway_ip(self.external_api_cidr)
tunnel_bearing_gateway = self._alloc_gateway_ip(self.tunnel_bearing_cidr)
internal_base_gateway = self._alloc_gateway_ip(self.internal_base_cidr)
debug_gateway = self._alloc_gateway_ip(self.debug_cidr)
external_api_info = {
"id": None
}
tunnel_bearing_info = {
"id": None
}
internal_base_info = {
"id": None
}
debug_info = {
"id": None
}
try:
if self.external_api_id is None:
self.external_api_id = self.installer.create_subnet("external_api",
self.external_api_cidr, az, self.external_api_gateway,
self.vpc_id)
if self.tunnel_bearing_id is None:
self.tunnel_bearing_id = self.installer.create_subnet("tunnel_bearing",
self.tunnel_bearing_cidr, az, tunnel_bearing_gateway,
self.vpc_id)
if self.internal_base_id is None:
self.internal_base_id = self.installer.create_subnet("internal_base",
self.internal_base_cidr, az, internal_base_gateway,
self.vpc_id)
if self.debug_id is None:
self.debug_id = self.installer.create_subnet("debug",
self.debug_cidr, az, debug_gateway,
self.vpc_id)
external_api_info = {
"id": self.external_api_id
}
tunnel_bearing_info = {
"id": self.tunnel_bearing_id
}
internal_base_info = {
"id": self.internal_base_id
}
debug_info = {
"id": self.debug_id
}
finally:
self.install_data_handler.write_subnets_info(external_api_info, tunnel_bearing_info, internal_base_info, debug_info)
def _delete_subnet(self):
if self.external_api_id:
self.installer.delete_subnet(self.vpc_id, self.external_api_id)
if self.tunnel_bearing_id:
self.installer.delete_subnet(self.vpc_id, self.tunnel_bearing_id)
if self.internal_base_id:
self.installer.delete_subnet(self.vpc_id, self.internal_base_id)
if self.debug_id:
self.installer.delete_subnet(self.vpc_id, self.debug_id)
@staticmethod
def _alloc_gateway_ip(cidr):
ip_list = cidr.split(".")
gateway_ip = ".".join([ip_list[0], ip_list[1], ip_list[2], SUBNET_GATEWAY_TAIL_IP])
return gateway_ip
@staticmethod
def _alloc_vpn_ip(cidr):
ip_list = cidr.split(".")
vpn_ip = ".".join([ip_list[0], ip_list[1], ip_list[2], VPN_TAIL_IP])
return vpn_ip
@staticmethod
def _alloc_cascaded_ip(cidr, tail_ip):
ip_list = cidr.split(".")
cascaded_ip = ".".join([ip_list[0], ip_list[1], ip_list[2], tail_ip])
return cascaded_ip
def _alloc_vpn_public_ip(self):
if self.vpn_public_ip_id is None:
public_ip_name = self.cloud_params["azname"]+"_vpn_public_ip"
result = self.installer.alloc_public_ip(public_ip_name)
self.vpn_public_ip = result["public_ip_address"]
self.vpn_public_ip_id = result["id"]
self.install_data_handler.write_public_ip_info(
self.vpn_public_ip,
self.vpn_public_ip_id
)
def _alloc_cascaded_public_ip(self):
if self.cascaded_public_ip_id is None:
public_ip_name = self.cloud_params["azname"]+"_cascaded_public_ip"
result = self.installer.alloc_public_ip(public_ip_name)
self.cascaded_public_ip = result["public_ip_address"]
self.cascaded_public_ip_id = result["id"]
self.install_data_handler.write_public_ip_info(
self.vpn_public_ip,
self.vpn_public_ip_id,
self.cascaded_public_ip,
self.cascaded_public_ip_id
)
def _release_public_ip(self):
self.installer.release_public_ip(self.vpn_public_ip_id)
self.installer.release_public_ip(self.cascaded_public_ip_id)
def cloud_preinstall(self):
self._cloud_preinstall()
def _cloud_preinstall(self):
self._alloc_subnets_and_ips()
def _alloc_subnets_and_ips(self):
if self.vpc_cidr is None:
network = self.cloud_params["network"]
if network:
self.vpc_cidr = network["vpc_cidr"]
self.external_api_cidr = network["external_api_cidr"]
self.tunnel_bearing_cidr = network["tunnel_bearing_cidr"]
self.internal_base_cidr = network["internal_base_cidr"]
self.debug_cidr = network["debug_cidr"]
else:
self.vpc_cidr = self.default_vpc_cidr
self.debug_cidr = self.default_debug_cidr
self.internal_base_cidr = self.default_internal_base_cidr
subnet_manager = SubnetManager()
subnet_pair = subnet_manager.distribute_subnet_pair\
(self.default_external_api_cidr, self.default_tunnel_bearing_cidr, constant.HwsConstant.INSTALL_INFO_FILE)
self.external_api_cidr = subnet_pair["external_api_cidr"]
self.tunnel_bearing_cidr = subnet_pair["tunnel_bearing_cidr"]
self.install_data_handler.write_subnets_cidr(self.vpc_cidr,
self.external_api_cidr,
self.tunnel_bearing_cidr,
self.internal_base_cidr,
self.debug_cidr )
self.cascaded_internal_base_ip = self._alloc_cascaded_ip(self.internal_base_cidr, "12")
self.cascaded_tunnel_bearing_ip = self._alloc_cascaded_ip(self.tunnel_bearing_cidr, "4")
self.cascaded_external_api_ip = self._alloc_cascaded_ip(self.external_api_cidr, "4")
self.cascaded_debug_ip = self._alloc_cascaded_ip(self.debug_cidr, "4")
def cloud_preuninstall(self):
pass
def _install_network(self):
self._create_vpc()
self._create_subnet()
def _uninstall_network(self):
self._delete_subnet()
self._delete_vpc()
def cloud_postinstall(self):
pass
def cloud_postuninstall(self):
#pdb.set_trace()
subnet_manager = SubnetManager()
subnet_manager.release_subnet_pair(self.default_tunnel_bearing_cidr, constant.HwsConstant.INSTALL_INFO_FILE)
self.install_data_handler.delete_cloud_info()
self.cloud_info_handler.delete_cloud_info()
def cloud_install(self):
self._cloud_install()
def _cloud_install(self):
self._install_proxy()
self._install_network()
vpn_info = self.cloud_params["vpn_info"]
if vpn_info and vpn_info["exist"] == "true":
self._read_exist_vpn_info()
else:
self._install_vpn()
self._install_cascaded()
def _read_exist_vpn_info(self):
vpn_info = self.cloud_params["vpn_info"]
self.vpn_public_ip = vpn_info["public_ip"],
self.vpn_external_api_ip = vpn_info["external_api_ip"],
self.vpn_tunnel_bearing_ip = vpn_info["tunnel_bearing_ip"]
def cloud_uninstall(self):
self._cloud_uninstall()
pass
def _cloud_uninstall(self):
self.uninstall_cascaded()
self.uninstall_vpn()
if self.delete_cascaded_job_id:
self.installer.block_until_delete_resource_success(self.delete_cascaded_job_id)
if self.delete_vpn_job_id:
self.installer.block_until_delete_resource_success(self.delete_vpn_job_id)
self._uninstall_network()
def _install_proxy(self):
if self.proxy_info is None:
self.proxy_info = proxy_manager.distribute_proxy()
self.install_data_handler.write_proxy(self.proxy_info)
def _install_cascaded(self):
nics = [{"subnet_id": self.debug_id,
"ip_address": self.cascaded_debug_ip}]
security_groups = [self.security_group_id]
server_name = self.cloud_params["azname"]+"_cascaded"
try:
if self.cascaded_server_id is None:
self.cascaded_server_job_id = self.installer.create_vm(self.cascaded_image_id,
self.cascaded_flavor,
server_name, self.vpc_id,
nics,ROOT_VOLUME_TYPE,
self.availability_zone,
adminPass = constant.HwsConstant.ROOT_PWD,
security_groups = security_groups)
self.cascaded_server_id = self.installer.block_until_create_vm_success(self.cascaded_server_job_id)
self._create_cascaded_nics()
self._modify_cascaded_external_api()
finally:
self.install_data_handler.write_cascaded_info(
self.cascaded_server_id,self.cascaded_public_ip,
self.cascaded_external_api_ip,self.cascaded_tunnel_bearing_ip,
self.tunnel_bearing_nic_id, self.external_api_nic_id,
self.internal_base_nic_id, self.port_id_bind_public_ip)
if self.vpn_server_id is None:
self.vpn_server_id = self.installer.block_until_create_vm_success(self.vpn_server_job_id)
self.unbound_vpn_ip_mac()
self.install_data_handler.write_vpn(
self.vpn_server_id, self.vpn_public_ip,
self.vpn_external_api_ip, self.vpn_tunnel_bearing_ip
)
LOG.info("install cascaded success.")
def unbound_vpn_ip_mac(self):
nics = self.installer.get_all_nics(self.vpn_server_id)
for nic in nics:
port_id = nic["port_id"]
mac_address = nic["mac_addr"]
self.installer.unbound_ip_mac(port_id, mac_address)
def _create_cascaded_nics(self):
##nic should add one by one to maintain right sequence
security_groups = [{"id":self.security_group_id}]
if self.internal_base_nic_id is None:
job_id = self.installer.add_nics(self.cascaded_server_id, self.internal_base_id,
security_groups, self.cascaded_internal_base_ip)
self.internal_base_nic_id = self.installer.block_until_create_nic_success(job_id)
if self.external_api_nic_id is None:
job_id = self.installer.add_nics(self.cascaded_server_id, self.external_api_id,
security_groups, self.cascaded_external_api_ip)
self.external_api_nic_id = self.installer.block_until_create_nic_success(job_id)
if self.tunnel_bearing_nic_id is None:
job_id = self.installer.add_nics(self.cascaded_server_id, self.tunnel_bearing_id,
security_groups, self.cascaded_tunnel_bearing_ip)
self.tunnel_bearing_nic_id = self.installer.block_until_create_nic_success(job_id)
if self.port_id_bind_public_ip is None:
#pdb.set_trace()
external_api_port_id = self.installer.get_external_api_port_id(
self.cascaded_server_id, self.external_api_nic_id)
self._alloc_cascaded_public_ip()
self.installer.bind_public_ip(self.cascaded_public_ip_id, external_api_port_id)
self.port_id_bind_public_ip = external_api_port_id
#pdb.set_trace()
self.installer.reboot(self.cascaded_server_id, "SOFT")
def _modify_cascaded_external_api(self):
#ssh to vpn, then ssh to cascaded through vpn tunnel_bearing_ip
self.cascaded_domain = self._distribute_cloud_domain(
self.cloud_params["project_info"]['region'], self.cloud_params['azname'], "--hws"),
modify_cascaded_api_domain_cmd = 'cd %(dir)s; ' \
'source /root/adminrc; ' \
'python %(script)s '\
'%(cascading_domain)s %(cascading_api_ip)s '\
'%(cascaded_domain)s %(cascaded_ip)s '\
'%(gateway)s'\
% {"dir": constant.Cascaded.REMOTE_HWS_SCRIPTS_DIR,
"script":constant.Cascaded.MODIFY_CASCADED_SCRIPT_PY,
"cascading_domain": self.cascading_domain,
"cascading_api_ip": self.cascading_api_ip,
"cascaded_domain": self.cascaded_domain,
"cascaded_ip": self.cascaded_external_api_ip,
"gateway": self.external_api_gateway}
#pdb.set_trace()
for i in range(180):
try:
execute_cmd_without_stdout(
host= self.vpn_public_ip,
user=constant.VpnConstant.VPN_ROOT,
password=constant.VpnConstant.VPN_ROOT_PWD,
cmd='cd %(dir)s; python %(script)s '
'%(cascaded_tunnel_ip)s %(user)s %(passwd)s \'%(cmd)s\''
% {"dir": constant.VpnConstant.REMOTE_ROUTE_SCRIPTS_DIR,
"script": constant.VpnConstant.MODIFY_CASCADED_API_SCRIPT,
"cascaded_tunnel_ip": self.cascaded_tunnel_bearing_ip,
"user": constant.HwsConstant.ROOT,
"passwd": constant.HwsConstant.ROOT_PWD,
"cmd": modify_cascaded_api_domain_cmd})
return True
except Exception as e:
if i == 120:
#wait cascaded vm to reboot ok
self.installer.reboot(self.cascaded_server_id, "SOFT")
LOG.error("can not connect to cascaded tunnel ip, error: %s, reboot it" % e.message)
return False
time.sleep(1)
def uninstall_cascaded(self):
self._uninstall_cascaded()
def _uninstall_cascaded(self):
if self.cascaded_server_id is None:
self.delete_cascaded_job_id = None
return
servers = [self.cascaded_server_id]
self.delete_cascaded_job_id = self.installer.delete_vm(servers, True, True)
def _install_vpn(self):
self._alloc_vpn_public_ip()
publicip = dict()
publicip["id"] = self.vpn_public_ip_id
self.vpn_external_api_ip = self._alloc_vpn_ip(self.external_api_cidr)
self.vpn_tunnel_bearing_ip = self._alloc_vpn_ip(self.tunnel_bearing_cidr)
nics = [{"subnet_id": self.external_api_id,
"ip_address": self.vpn_external_api_ip},
{"subnet_id": self.tunnel_bearing_id,
"ip_address": self.vpn_tunnel_bearing_ip}]
server_name = self.cloud_params["azname"]+"_vpn"
if self.vpn_server_id is None:
self.vpn_server_job_id = self.installer.create_vm(
self.vpn_image_id,
self.vpn_flavor, server_name,
self.vpc_id, nics,
ROOT_VOLUME_TYPE,
self.availability_zone,
public_ip_id=self.vpn_public_ip_id,
adminPass=constant.VpnConstant.VPN_ROOT_PWD,
security_groups=[self.security_group_id])
def uninstall_vpn(self):
self._uninstall_vpn()
def _uninstall_vpn(self):
if self.vpn_server_id is None:
self.delete_vpn_job_id = None
return
servers = [self.vpn_server_id]
self.delete_vpn_job_id = self.installer.delete_vm(servers, True, True)
def package_cloud_info(self):
return self.package_hws_cloud_info()
def _distribute_cloud_domain(self, region_name, azname, az_tag):
domain_list = self.cascading_domain.split(".")
domainpostfix = ".".join([domain_list[2], domain_list[3]])
l_region_name = region_name.lower()
cloud_cascaded_domain = ".".join(
[azname, l_region_name + az_tag, domainpostfix])
self.cascaded_aggregate = ".".join([azname, l_region_name + az_tag])
return cloud_cascaded_domain
def package_hws_cloud_info(self):
cascaded_vpn_info = {
"public_ip": self.vpn_public_ip,
"external_api_ip": self.vpn_external_api_ip,
"tunnel_bearing_ip": self.vpn_tunnel_bearing_ip
}
cascaded_info = {
"public_ip": self.cascaded_public_ip,
"external_api_ip": self.cascaded_external_api_ip,
"tunnel_bearing_ip": self.cascaded_tunnel_bearing_ip,
"internal_base_ip": self.cascaded_internal_base_ip,
"domain": self._distribute_cloud_domain(
self.cloud_params["project_info"]['region'], self.cloud_params['azname'], "--hws"),
"aggregate": self.cascaded_aggregate
}
cascaded_subnets_info = {
"vpc_id": self.vpc_id,
"security_group_id": self.security_group_id,
"tunnel_bearing_id": self.tunnel_bearing_id,
"internal_base_id": self.internal_base_id,
"external_api": self.external_api_cidr,
"external_api_gateway_ip": self.external_api_gateway,
"tunnel_bearing": self.tunnel_bearing_cidr,
"internal_base": self.internal_base_cidr,
"debug": self.debug_cidr
}
cascading_info = {
"external_api_ip": self.cascading_api_ip,
"domain": self.cascading_domain
}
cascading_vpn_info = {
"public_ip": self.cascading_vpn_public_gw,
"external_api_ip": self.cascading_vpn_api_ip,
"tunnel_bearing_ip": self.cascading_vpn_tunnel_ip
}
cascading_subnets_info = {
"external_api": self.cascading_api_subnet,
"tunnel_bearing": self.cascading_tunnel_subnet
}
vpn_conn_name = {
"api_conn_name": self.cloud_id + '-api',
"tunnel_conn_name": self.cloud_id + '-tunnel'
}
#pdb.set_trace()
info = {"cloud_id": self.cloud_id,
"access": self.cloud_params["access"],
"cascaded_vpn_info":cascaded_vpn_info,
"cascading_vpn_info":cascading_vpn_info,
"cascaded_info": cascaded_info,
"cascading_info":cascading_info,
"cascaded_subnets_info": cascaded_subnets_info,
"cascading_subnets_info": cascading_subnets_info,
"vpn_conn_name": vpn_conn_name,
"proxy_info": self.proxy_info
}
self.cloud_info_handler.write_cloud_info(info)
return info
def _read_install_info(self):
self.vpc_id = None
self.internal_base_id = None
self.debug_id = None
self.external_api_id = None
self.tunnel_bearing_id = None
self.vpn_server_id = None
self.cascaded_server_id = None
self.cascaded_public_ip = None
self.cascaded_public_ip_id = None
self.vpn_public_ip_id = None
self.tunnel_bearing_nic_id = None
self.external_api_nic_id = None
self.internal_base_nic_id = None
self.port_id_bind_public_ip = None
self.security_group_id = None
self.proxy_info = None
self.vpc_cidr = None
self.external_api_cidr = None
self.tunnel_bearing_cidr = None
self.internal_base_cidr = None
self.debug_cidr = None
cloud_info = self.install_data_handler.read_cloud_info()
if cloud_info is None:
return
if "proxy_info" in cloud_info.keys():
self.proxy_info = cloud_info["proxy_info"]
if "subnets_cidr" in cloud_info.keys():
subnet_cidr = cloud_info["subnets_cidr"]
self.vpc_cidr = subnet_cidr["vpc_cidr"]
self.external_api_cidr = subnet_cidr["external_api_cidr"]
self.tunnel_bearing_cidr = subnet_cidr["tunnel_bearing_cidr"]
self.internal_base_cidr = subnet_cidr["internal_base_cidr"]
self.debug_cidr = subnet_cidr["debug_cidr"]
if "vpc" in cloud_info.keys():
self.vpc_id = cloud_info["vpc"]["id"]
self.security_group_id = cloud_info["vpc"]["security_group_id"]
if "subnets" in cloud_info.keys():
subnets = cloud_info["subnets"]
self.internal_base_id = subnets["internal_base"]["id"]
self.debug_id = subnets["debug"]["id"]
self.external_api_id = subnets["external_api"]["id"]
self.tunnel_bearing_id = subnets["tunnel_bearing"]["id"]
if "vpn" in cloud_info.keys():
self.vpn_server_id = cloud_info["vpn"]["server_id"]
if "cascaded" in cloud_info.keys():
self.cascaded_server_id = cloud_info["cascaded"]["server_id"]
self.tunnel_bearing_nic_id = cloud_info["cascaded"]["tunnel_bearing_nic_id"]
self.external_api_nic_id = cloud_info["cascaded"]["external_api_nic_id"]
self.internal_base_nic_id = cloud_info["cascaded"]["internal_base_nic_id"]
self.port_id_bind_public_ip = cloud_info["cascaded"]["port_id_bind_public_ip"]
if "public_ip" in cloud_info.keys():
self.vpn_public_ip = cloud_info["public_ip"]["vpn_public_ip"]
self.vpn_public_ip_id = cloud_info["public_ip"]["vpn_public_ip_id"]
self.cascaded_public_ip = cloud_info["public_ip"]["cascaded_public_ip"]
self.cascaded_public_ip_id = cloud_info["public_ip"]["cascaded_public_ip_id"]
def _read_cloud_info(self):
cloud_info = self.cloud_info_handler.read_cloud_info()
return cloud_info
def get_cloud_info(self):
self._read_install_info()
return self._read_cloud_info()
| {
"content_hash": "98128a51974b3f1388d837c52fec53b9",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 128,
"avg_line_length": 44.378763866877975,
"alnum_prop": 0.5765096596793201,
"repo_name": "Hybrid-Cloud/cloud_manager",
"id": "1f139cabfac3264decf363795b9dff4d74660568",
"size": "28004",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "code/cloudmanager/install/hws/hws_install.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20141021_1346'),
]
operations = [
migrations.CreateModel(
name='AdjustmentsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AlternativeMinimumInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CreditsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GrossIncomeInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='IncomeRatesInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ItemizedDeductionsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SocialSecurityInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardDeductionsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.RenameModel(
old_name='TaxSaveInputs',
new_name='PersonalExemptionsInputs',
),
]
| {
"content_hash": "98e674c86c68267d9d4525a6f3f89215",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 114,
"avg_line_length": 31.55056179775281,
"alnum_prop": 0.49572649572649574,
"repo_name": "PeterDSteinberg/webapp-public",
"id": "6aef41e12b337d32626c66327d77e1b5ddd333b9",
"size": "2832",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0014_auto_20141021_1901.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61933"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "381167"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
"""List of checks for crosswind sweeps."""
from makani.analysis.checks.collection import score_checks
from makani.analysis.crosswind_batch_sims import batch_sim_params
class CrosswindSweepChecks(score_checks.ScoreChecks):
"""The list of checks for crosswind sweep."""
def __init__(self, for_log, wing_model):
sweep_parameters = batch_sim_params.CrosswindSweepsParameters(
only_steady_flight=True, steady_flight_mode_time=0.0,
wing_model=wing_model)
super(CrosswindSweepChecks, self).__init__(
for_log, sweep_parameters.scoring_functions, ['CrosswindNormal'])
| {
"content_hash": "d1ef3a45db8fe69b29db5c319440aa27",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 73,
"avg_line_length": 39.93333333333333,
"alnum_prop": 0.7362270450751253,
"repo_name": "google/makani",
"id": "d340eb4168494a1eec119e014d938f7ac9fe6751",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/crosswind_batch_sims/crosswind_sweep_checks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
} |
"""
Sales integration with Events module
"""
from maker.sales.models import Opportunity
from maker.core.models import Object
from maker.events.rendering import EventRenderer
from django.db.models import Q
import datetime, time
def get_events(request):
"Return a list of EventRenderers from available Sales"
events = []
query = Q(expected_date__isnull=False)
sales = Object.filter_by_request(request, manager=Opportunity.objects.filter(query))
for sale in sales:
# event = EventRenderer(sale.contact.name, None, sale.expected_date, sale.get_absolute_url())
event = EventRenderer(sale.contact.name, None, datetime.datetime.fromtimestamp(time.mktime(time.strptime(str(sale.expected_date), "%Y-%m-%d"))), sale.get_absolute_url()) #bad code
event.css_class += " projects-calendar-task"
events.append(event)
return events
| {
"content_hash": "8c2056a669f46e308908ae6eef5848f8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 187,
"avg_line_length": 36.541666666666664,
"alnum_prop": 0.7251995438996579,
"repo_name": "alejo8591/maker",
"id": "e9347439e0b07098119db81519fc9d7b21341583",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sales/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1578070"
},
{
"name": "Perl",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2863599"
},
{
"name": "Shell",
"bytes": "3561"
}
],
"symlink_target": ""
} |
import os
from django.core.management import BaseCommand
from django.contrib.gis.utils import LayerMapping
from mfl_gis.models import WorldBorder
WORLD_SHAPEFILE = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__) # Folder with this file i.e 'commands'
) # Parent of folder where this file is i.e 'management'
), # The application folder itself i.e mfl_gis
'data/world/TM_WORLD_BORDERS-0.3.shp'
)
WORLD_SHAPEFILE_MODEL_MAPPING = {
'name': 'NAME',
'code': 'ISO3', # We could have used the ISO2 code also
'mpoly': 'MULTIPOLYGON',
'longitude': 'LON',
'latitude': 'LAT'
}
class Command(BaseCommand):
def handle(self, *args, **options):
wb_count = WorldBorder.objects.count()
if wb_count:
self.stdout.write(
'{} countries already exist'.format(wb_count))
return
lm = LayerMapping(
WorldBorder,
WORLD_SHAPEFILE,
WORLD_SHAPEFILE_MODEL_MAPPING,
transform=False,
encoding='iso-8859-1'
)
lm.save(strict=True, verbose=False)
self.stdout.write("Loaded world borders")
| {
"content_hash": "c2a11b230618489e715e066d44be95c8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 28.642857142857142,
"alnum_prop": 0.6084788029925187,
"repo_name": "MasterFacilityList/mfl_api",
"id": "c323656c0500c5571471c19df4af4aea00318d98",
"size": "1203",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "mfl_gis/management/commands/load_world_boundaries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "54029"
},
{
"name": "JavaScript",
"bytes": "1285"
},
{
"name": "PLpgSQL",
"bytes": "8030"
},
{
"name": "Python",
"bytes": "1111072"
},
{
"name": "Ruby",
"bytes": "1251"
},
{
"name": "Shell",
"bytes": "1455"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, patterns
from django.utils.translation import ugettext
from django.views.generic import RedirectView
from models import Vote, Bill
from views import (
VoteListView, VoteCsvView, VoteDetailView, VoteTagsView,
BillListView, BillCsvView, BillDetailView, BillTagsView,
bill_unbind_vote, bill_auto_complete,
bill_tags_cloud, embed_bill_details,
vote_tags_cloud, vote_auto_complete,
knesset_proposal_auto_complete)
import feeds
vote_view = VoteListView(queryset=Vote.objects.all(),
paginate_by=20,
extra_context={'votes': True,
'title': ugettext('Votes')})
bill_list_view = BillListView(
queryset=Bill.objects.all().filter(
law__merged_into=None).order_by('-stage_date'),
paginate_by=20, extra_context={'title': ugettext('Bills')})
bill_detail_view = BillDetailView.as_view()
vote_list_view = VoteListView(queryset=Vote.objects.all(),
paginate_by=20,
extra_context={'votes': True,
'title': ugettext('Votes')})
vote_detail_view = VoteDetailView.as_view()
lawsurlpatterns = patterns(
'',
url(r'^bill/$', bill_list_view, name='bill-list'),
url(r'^bill/tag/$', bill_tags_cloud, name='bill-tags-cloud'),
url(r'^bill/rss/$', feeds.Bills(), name='bills-feed'),
url(r'^bill/csv/$', BillCsvView.as_view()),
url(r'^bill/tag/(?P<tag>.*)/$', BillTagsView.as_view(), name='bill-tag'),
url(r'^bill/knesset-booklet/(?P<booklet_num>\d+)/$', RedirectView.as_view(
url='/bill/?booklet=%(booklet_num)s', permanent=True)),
url(r'^bill/(?P<pk>\d+)/$', bill_detail_view, name='bill-detail'),
url(r'^bill/(?P<object_id>\d+)/embed/$',
embed_bill_details, name='embed-bill-details'),
url(r'^bill/(?P<object_id>\d+)/unbind-vote/(?P<vote_id>\d+)/$',
bill_unbind_vote, name='bill-unbind-vote'),
url(r'^bill/auto_complete/$',
bill_auto_complete, name='bill-auto-complete'),
url(r'^bill/auto_complete_knesset_proposal/$',
knesset_proposal_auto_complete, name='bill-auto-complete'),
url(r'^bill/(?P<slug>[\w\-\"]+)/(?P<pk>\d+)/$',
bill_detail_view, name='bill-detail-with-slug'),
url(r'^vote/$', vote_list_view, name='vote-list'),
url(r'^vote/csv/$', VoteCsvView.as_view()),
url(r'^vote/tag/$', vote_tags_cloud, name='vote-tags-cloud'),
url(r'^vote/rss/$', feeds.Votes(), name='votes-feed'),
url(r'^vote/tag/(?P<tag>.*)/$', VoteTagsView.as_view(), name='vote-tag'),
url(r'^vote/(?P<pk>\d+)/$', vote_detail_view, name='vote-detail'),
url(r'^vote/(?P<object_id>\d+)/$', vote_view, name='vote-detail'),
# TODO:the next url is hardcoded in a js file
url(r'^vote/auto_complete/$',
vote_auto_complete, name='vote-auto-complete'),
)
| {
"content_hash": "eac46c824b23f1d32384c641aab40e22",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 49.220338983050844,
"alnum_prop": 0.6070936639118457,
"repo_name": "navotsil/Open-Knesset",
"id": "f1198608fd20936545d856636af5cf3e796ab274",
"size": "2921",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "laws/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "346228"
},
{
"name": "HTML",
"bytes": "690043"
},
{
"name": "JavaScript",
"bytes": "214741"
},
{
"name": "Python",
"bytes": "4051776"
},
{
"name": "Shell",
"bytes": "203"
}
],
"symlink_target": ""
} |
import datetime
import pagerange
import re
from django import forms
from django.db import transaction
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.core.validators import RegexValidator
from django.core.exceptions import ValidationError
from crispy_forms.helper import FormHelper
from . import models
from .widgets import IntegerRangeField
# Helper code -----------------------------------------------------------------
def get_object_or_none(klass, *args, **kwargs):
if hasattr(klass, '_default_manager'):
queryset = klass._default_manager.all()
else:
queryset = klass
try:
return queryset.get(*args, **kwargs)
except AttributeError:
klass__name = (
klass.__name__
if isinstance(klass, type)
else klass.__class__.__name__)
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
except queryset.model.DoesNotExist:
return None
# ListModelChoiceField --------------------------------------------------------
class ListModelChoiceField(forms.ChoiceField):
"""
Special field using list instead of queryset as choices
"""
def __init__(self, model, *args, **kwargs):
self.model = model
super().__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return None
try:
value = self.model.objects.get(uuid=value)
except self.model.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def valid_value(self, value):
"""Check to see if the provided value is a valid choice."""
if any(value.uuid == choice[0] for choice in self.choices):
return True
else:
return False
# Form for importing BarcodeSet from JSON -------------------------------------
class BarcodeSetImportForm(forms.Form):
"""The form used for uploading serialized barcode sets from JSON"""
#: File upload field
json_file = forms.FileField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.template_pack = 'bootstrap4'
self.helper.form_tag = False
# BarcodeSetEntry multi-edit related ------------------------------------------
#: Number of additional barcode set entry forms (= table rows) to create
EXTRA_BARCODE_FORMS = 10
#: Fields to use for the barcode set forms (= table rows)
BARCODE_SET_ENTRY_FIELDS = ('name', 'sequence')
class BarcodeSetEntryForm(forms.ModelForm):
"""Form for handling barcode entries (table rows in the form set)"""
class Meta:
model = models.BarcodeSetEntry
fields = BARCODE_SET_ENTRY_FIELDS
class BaseBarcodeSetEntryFormSet(BaseModelFormSet):
"""Base class for the form set to create"""
def __init__(self, *args, **kwargs):
self.barcode_set = kwargs.pop('barcode_set')
super().__init__(*args, **kwargs)
self.queryset = self.barcode_set.entries.order_by('name').all()
def save(self, *args, **kwargs):
"""Handle saving of form set, including support for deleting barcode
set entries
"""
with transaction.atomic():
entries = super().save(*args, commit=False, **kwargs)
for entry in entries:
entry.barcode_set = self.barcode_set
entry.save()
for entry in self.deleted_objects:
entry.delete()
return entries
#: Form set for barcodes, constructed with factory function
BarcodeSetEntryFormSet = modelformset_factory(
models.BarcodeSetEntry,
can_delete=True,
form=BarcodeSetEntryForm,
formset=BaseBarcodeSetEntryFormSet,
fields=BARCODE_SET_ENTRY_FIELDS,
extra=EXTRA_BARCODE_FORMS)
# Form for importing FlowCell from JSON ---------------------------------------
class FlowCellImportForm(forms.Form):
"""The form used for uploading serialized flow cells from JSON"""
#: File upload field
json_file = forms.FileField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.template_pack = 'bootstrap4'
self.helper.form_tag = False
# FlowCell related ------------------------------------------------------------
#: Regular expression for flow cell names
FLOW_CELL_NAME_RE = (
r'^(?P<date>\d{6,6})'
r'_(?P<machine_name>[^_]+)'
r'_(?P<run_no>\d+)'
r'_(?P<slot>\w)'
r'_(?P<vendor_id>[^_]+)'
r'(_(?P<label>.+))?$')
class FlowCellForm(forms.ModelForm):
"""Custom form for manipulating FlowCell objects
We need a special form to tokenize/untokenize the flow cell name to/from
properties.
"""
#: Special field with the flow cell name. The different tokens
#: will be extracted in the form's logic
name = forms.CharField(
max_length=100,
validators=[
RegexValidator(
FLOW_CELL_NAME_RE,
message=('Invalid flow cell name. Did you forgot the '
'underscore between the slot and the vendor ID?'))],
help_text=('The full flow cell name, e.g., '
'160303_ST-K12345_0815_A_BCDEFGHIXX_LABEL'))
class Meta:
model = models.FlowCell
fields = ('name', 'description', 'num_lanes', 'operator', 'demux_operator',
'rta_version', 'status_sequencing', 'status_conversion', 'status_delivery',
'delivery_type')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance:
self.fields['name'].initial = self.instance.get_full_name()
def clean(self):
if 'name' not in self.cleaned_data:
return self.cleaned_data # give up, wrong format
name_dict = re.match(
FLOW_CELL_NAME_RE, self.cleaned_data.pop('name')).groupdict()
self.cleaned_data['run_date'] = datetime.datetime.strptime(
name_dict['date'], '%y%m%d').date()
self.cleaned_data['sequencing_machine'] = get_object_or_none(
models.SequencingMachine, vendor_id=name_dict['machine_name'])
if self.cleaned_data['sequencing_machine'] is None:
self.add_error('name', 'Unknown sequencing machine')
self.cleaned_data['run_number'] = int(name_dict['run_no'])
self.cleaned_data['slot'] = name_dict['slot']
self.cleaned_data['vendor_id'] = name_dict['vendor_id']
self.cleaned_data['label'] = name_dict['label']
return self.cleaned_data
def save(self, *args, **kwargs):
for key in ('run_date', 'sequencing_machine', 'run_number', 'slot',
'vendor_id', 'label'):
setattr(self.instance, key, self.cleaned_data[key])
return super().save(*args, **kwargs)
class LibrariesPrefillForm(forms.Form):
"""Helper form for filling out forms with barcodes"""
#: Choice field for selecting first barcode
barcode1 = forms.ModelChoiceField(
to_field_name='uuid',
required=False,
queryset=models.BarcodeSet.objects.order_by('name').all())
#: Choice field for selecting second barcode
barcode2 = forms.ModelChoiceField(
to_field_name='uuid',
required=False,
queryset=models.BarcodeSet.objects.order_by('name').all())
class FlowCellUpdateStatusForm(forms.Form):
"""Helper form for updating the status."""
attribute = forms.ChoiceField(
required=True,
choices=tuple(zip(
('sequencing', 'conversion', 'delivery'),
('sequencing', 'conversion', 'delivery'))))
status = forms.ChoiceField(
required=True,
choices=tuple(zip(
('initial', 'complete', 'failed', 'closed', 'canceled'),
('initial', 'complete', 'failed', 'closed', 'canceled'))))
# Library multi-edit related -------------------------------------------------
#: Number of additional barcode set entry forms (= table rows) to create
EXTRA_LIBRARY_FORMS = 10
#: Fields to use for the library forms (= table rows)
LIBRARY_FIELDS = ('name', 'reference', 'barcode_set', 'barcode',
'barcode_set2', 'barcode2', 'lane_numbers')
#: Regex for library name
LIBRARY_NAME_REGEX = r'^[a-zA-Z0-9_\-]+$'
class BarcodeSelect(forms.Select):
"""Barcode selection, adds "data-barcode-set" attribute to <option>
This is required for the form JavaScript to limit selections to the
barcodes from the given barcode sets.
"""
def render_options(self, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label, option_model in self.choices:
if isinstance(option_label, (list, tuple)):
output.append(format_html(
'<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(
selected_choices, option, option_model))
output.append('</optgroup>')
else:
output.append(self.render_option(
selected_choices, option_value, option_label,
option_model))
return '\n'.join(output)
def render_option(self, selected_choices, option_value, option_label,
option_model):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
if option_model and hasattr(option_model, 'barcode_set_id'):
set_id = option_model.barcode_set.uuid
else:
set_id = ''
return format_html('<option data-set-id="{}" value="{}"{}>{}</option>',
set_id,
option_value,
selected_html,
force_text(option_label))
class LibraryForm(forms.ModelForm):
"""Form for handling library entries (table rows in the form set)"""
name = forms.CharField(
required=False,
max_length=100,
validators=[
RegexValidator(
LIBRARY_NAME_REGEX,
message=('Invalid library name. Alphanumerics, underscores '
'and dashes are allowed.'))])
# The choices for the fields above are construct in the form set to reduce the number of
# database queries.
barcode_set = ListModelChoiceField(
model=models.BarcodeSet,
required=False)
barcode = ListModelChoiceField(
model=models.BarcodeSetEntry,
required=False,
widget=BarcodeSelect)
barcode_set2 = ListModelChoiceField(
model=models.BarcodeSet,
required=False)
barcode2 = ListModelChoiceField(
model=models.BarcodeSetEntry,
required=False,
widget=BarcodeSelect)
lane_numbers = IntegerRangeField(required=True, min_length=1)
def __init__(self, *args, **kwargs):
# Pre-set the lane numbers, required for Django Formsets to work
# with uninitialized values
kwargs.setdefault('initial', {})
if 'instance' in kwargs:
kwargs['initial']['lane_numbers'] = kwargs['instance'].lane_numbers
if kwargs['instance'].barcode_set:
kwargs['initial']['barcode_set'] = kwargs['instance'].barcode_set.uuid
if kwargs['instance'].barcode:
kwargs['initial']['barcode'] = kwargs['instance'].barcode.uuid
if kwargs['instance'].barcode_set2:
kwargs['initial']['barcode_set2'] = kwargs['instance'].barcode_set2.uuid
if kwargs['instance'].barcode2:
kwargs['initial']['barcode2'] = kwargs['instance'].barcode2.uuid
else:
kwargs['initial']['lane_numbers'] = []
super().__init__(*args, **kwargs)
class Meta:
model = models.Library
fields = LIBRARY_FIELDS
class BaseLibraryFormSet(BaseModelFormSet):
"""Base class for the form set to create"""
def __init__(self, *args, **kwargs):
self.flow_cell = kwargs.pop('flow_cell')
super().__init__(*args, **kwargs)
self.queryset = self.flow_cell.libraries.all()
self._fixup_forms()
def _fixup_forms(self):
"""Prevent too many DB queries."""
barcode_set_choices = [('', '----')] + [
(m.uuid, m.name) for m in models.BarcodeSet.objects.all()]
barcode_choices = [('', '----', None)] + [
(m.uuid, m.name, m)
for m in models.BarcodeSetEntry.objects.select_related('barcode_set').all()]
for form in self:
form.fields['barcode_set'].choices = barcode_set_choices
form.fields['barcode'].choices = barcode_choices
form.fields['barcode_set2'].choices = barcode_set_choices
form.fields['barcode2'].choices = barcode_choices
def save(self, *args, **kwargs):
"""Handle saving of form set, including support for deleting barcode
set entries
"""
with transaction.atomic():
entries = super().save(*args, commit=False, **kwargs)
for entry in entries:
entry.flow_cell = self.flow_cell
entry.save()
for entry in self.deleted_objects:
entry.delete()
return entries
#: Form set for barcodes, constructed with factory function
LibraryFormSet = modelformset_factory(
models.Library,
can_delete=True,
form=LibraryForm,
formset=BaseLibraryFormSet,
fields=LIBRARY_FIELDS,
extra=EXTRA_LIBRARY_FORMS)
# Wizard for XLS copy-and-paste ----------------------------------------------
class PasteTSVForm(forms.Form):
"""First step of the XLS copy-and-paste wizard
Allows copy and paste of TSV data or providing an upload XLS file
"""
#: A Textarea for copy and paste
payload = forms.CharField(
required=False,
label='Tab-separated values',
help_text='Copy-and paste fields from Excel here',
widget=forms.Textarea)
def clean(self):
payload = self.cleaned_data.get('payload')
if not payload:
self.add_error('payload', 'No data found')
return self.cleaned_data
for l in payload.splitlines():
if len(l.split('\t')) < 3:
self.add_error(
'payload',
'Expected at least 3 tab-separated columns, '
'please check your data')
break
return self.cleaned_data
class PickColumnsForm(forms.Form):
"""Second step in the XLS copy-and-paste wizard
Allows to select the barcode sets and barcode set entries as well as
columns for the sets and the row to start at.
"""
#: Reference to use for all samples
reference = forms.ChoiceField(
required=True,
choices=models.REFERENCE_CHOICES,
label='Reference/Organism',
help_text=('Upon import, the same for all samples. Can be changed '
'later on'))
#: Select column for sample name
sample_column = forms.IntegerField(
min_value=1,
required=True,
label='Sample column index',
help_text='The first column has index 1')
#: Barcode set for barcode 1
barcode_set = forms.ModelChoiceField(
to_field_name='uuid',
required=False,
label='Barcode set 1',
queryset=models.BarcodeSet.objects.order_by('name'))
#: Select column for barcode 1
barcode_column = forms.IntegerField(
min_value=1,
required=False,
label='Barcode 1 column index',
help_text='Leave empty for no barcodes. The first column has index 1')
#: Barcode set for barcode 2
barcode_set2 = forms.ModelChoiceField(
to_field_name='uuid',
required=False,
label='Barcode set 2',
queryset=models.BarcodeSet.objects.order_by('name'),
help_text='Leave empty for no secondary barcodes')
#: Select column for barcode 2
barcode2_column = forms.IntegerField(
min_value=1,
required=False,
label='Barcode 2 column index',
help_text=('Leave empty for no secondary barcodes. The first column '
'has index 1'))
#: Select row number to start at
first_row = forms.IntegerField(
min_value=1,
required=False,
label='First data row',
help_text=('Select number of first row with data. The first row '
'has index 1'))
#: Select column for lanes
lane_numbers_column = forms.IntegerField(
min_value=1,
required=True,
label='Lane column index',
help_text='The first column has index 1')
def __init__(self, table_rows=None, table_ncols=None, *args, **kwargs):
super(PickColumnsForm, self).__init__(*args, **kwargs)
self.table_rows = table_rows
self.table_ncols = table_ncols
def clean(self):
if not self.cleaned_data.get('barcode_set'):
self.add_error('barcode_set', 'Please select a barcode set')
# Validate column range
for f in [
'sample_column', 'barcode_column', 'barcode_colum',
'lane_numbers_column']:
col_field = self.cleaned_data.get(f)
if col_field and col_field > len(self.table_ncols):
self.add_error(f, 'Column out of data range')
# Validate row range
first_row = self.cleaned_data.get('first_row')
if first_row and first_row > len(self.table_rows):
self.add_error('first_row', 'Row out of data range')
# Validate sample column
sample_col = self.cleaned_data.get('sample_column')
if sample_col:
for row in self.table_rows:
if not re.match(LIBRARY_NAME_REGEX, row[sample_col - 1]):
self.add_error(
'sample_column',
'Sample names may only contain alphanumerics, '
'underscores and dashes. Did you select the correct '
'column?')
break
# Validate lane column
lane_col = self.cleaned_data.get('lane_numbers_column')
if lane_col:
for row in self.table_rows:
try:
pagerange.PageRange(row[lane_col - 1])
except ValueError:
self.add_error(
'lane_numbers_column',
'Invalid page range(s) found. Did you select the '
'correct column?')
break
class ConfirmExtractionForm(forms.Form):
"""Empty form, used for confirming that the detection worked correctly
"""
| {
"content_hash": "1c2f497c7feefe252e0536a615453f22",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 95,
"avg_line_length": 35.33873873873874,
"alnum_prop": 0.5849181665222046,
"repo_name": "bihealth/flowcelltool",
"id": "5e5cadd5445ca3a5387c18408e391c4acd654292",
"size": "19638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowcelltool/flowcells/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "165106"
},
{
"name": "HTML",
"bytes": "110088"
},
{
"name": "JavaScript",
"bytes": "30968"
},
{
"name": "Python",
"bytes": "420211"
},
{
"name": "Shell",
"bytes": "5028"
}
],
"symlink_target": ""
} |
"""Django settings for use within the docker container."""
from os import environ
import dj_database_url
from .production import *
# Disable debug mode
DEBUG = False
SECRET_KEY = environ.get('SECRET_KEY') or 'please-change-me'
PROJECT_ROOT = (
environ.get('PROJECT_ROOT') or dirname(dirname(abspath(__file__))))
SERVICE_DIRECTORY_API_BASE_URL = environ.get(
'SERVICE_DIRECTORY_API_BASE_URL', '')
SERVICE_DIRECTORY_API_USERNAME = environ.get(
'SERVICE_DIRECTORY_API_USERNAME', '')
SERVICE_DIRECTORY_API_PASSWORD = environ.get(
'SERVICE_DIRECTORY_API_PASSWORD', '')
GOOGLE_PLACES_API_SERVER_KEY = environ.get(
'GOOGLE_PLACES_API_SERVER_KEY', '')
RAVEN_DSN = environ.get('RAVEN_DSN')
RAVEN_CONFIG = {'dsn': RAVEN_DSN} if RAVEN_DSN else {}
CAS_SERVER_URL = environ.get('CAS_SERVER_URL') or ''
COMPRESS_OFFLINE = True
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///%s' % (join(PROJECT_ROOT, 'gemmolo.db'),))}
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
STATIC_ROOT = join(PROJECT_ROOT, 'static')
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
| {
"content_hash": "f654d7fb178284e4499c2f726daa9d0e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6855855855855856,
"repo_name": "Mitso/springstertestapp",
"id": "4db62d7c1c1c365b967f5e2692deca22a8ff57f7",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gem/settings/docker.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "128710"
},
{
"name": "HTML",
"bytes": "138391"
},
{
"name": "JavaScript",
"bytes": "9716"
},
{
"name": "Python",
"bytes": "183268"
},
{
"name": "Shell",
"bytes": "563"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/robe/shared_robe_s05.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","robe_s05")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "a518acea63c3537316783a41ffb45d18",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6983606557377049,
"repo_name": "obi-two/Rebelion",
"id": "a81182fdafe355406368fa964b27a89cae9f2da6",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/wearables/robe/shared_robe_s05.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import (
MovieCreateAPIView,
MovieDeleteAPIView,
MovieDetailAPIView,
MovieListAPIView,
)
urlpatterns = [
url(r'^$', MovieListAPIView.as_view(), name='list'),
url(r'^create/$', MovieCreateAPIView.as_view(), name='create'),
url(r'^(?P<pk>\d+)/$', MovieDetailAPIView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/delete/$', MovieDeleteAPIView.as_view(), name='delete'),
] | {
"content_hash": "cefe5015313fbd4bb3b0fbb85c433df3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 28.25,
"alnum_prop": 0.6438053097345132,
"repo_name": "CyriusG/frontpage_backend",
"id": "b6ac2ab3de5f0f3ac46e3859c590a6fe8cee794f",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "movie/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1865"
},
{
"name": "Python",
"bytes": "44688"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_forwarding_direction import TapiCommonForwardingDirection # noqa: F401,E501
from tapi_server.models.tapi_common_global_class import TapiCommonGlobalClass # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state_pac import TapiCommonOperationalStatePac # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connection_end_point_ref import TapiConnectivityConnectionEndPointRef # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connection_ref import TapiConnectivityConnectionRef # noqa: F401,E501
from tapi_server.models.tapi_connectivity_route import TapiConnectivityRoute # noqa: F401,E501
from tapi_server.models.tapi_connectivity_switch_control import TapiConnectivitySwitchControl # noqa: F401,E501
from tapi_server.models.tapi_topology_link_ref import TapiTopologyLinkRef # noqa: F401,E501
from tapi_server import util
class TapiConnectivityConnection(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, name=None, uuid=None, operational_state=None, lifecycle_state=None, supported_client_link=None, lower_connection=None, switch_control=None, route=None, layer_protocol_name=None, connection_end_point=None, direction=None): # noqa: E501
"""TapiConnectivityConnection - a model defined in OpenAPI
:param name: The name of this TapiConnectivityConnection. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param uuid: The uuid of this TapiConnectivityConnection. # noqa: E501
:type uuid: str
:param operational_state: The operational_state of this TapiConnectivityConnection. # noqa: E501
:type operational_state: TapiCommonOperationalState
:param lifecycle_state: The lifecycle_state of this TapiConnectivityConnection. # noqa: E501
:type lifecycle_state: TapiCommonLifecycleState
:param supported_client_link: The supported_client_link of this TapiConnectivityConnection. # noqa: E501
:type supported_client_link: List[TapiTopologyLinkRef]
:param lower_connection: The lower_connection of this TapiConnectivityConnection. # noqa: E501
:type lower_connection: List[TapiConnectivityConnectionRef]
:param switch_control: The switch_control of this TapiConnectivityConnection. # noqa: E501
:type switch_control: List[TapiConnectivitySwitchControl]
:param route: The route of this TapiConnectivityConnection. # noqa: E501
:type route: List[TapiConnectivityRoute]
:param layer_protocol_name: The layer_protocol_name of this TapiConnectivityConnection. # noqa: E501
:type layer_protocol_name: TapiCommonLayerProtocolName
:param connection_end_point: The connection_end_point of this TapiConnectivityConnection. # noqa: E501
:type connection_end_point: List[TapiConnectivityConnectionEndPointRef]
:param direction: The direction of this TapiConnectivityConnection. # noqa: E501
:type direction: TapiCommonForwardingDirection
"""
self.openapi_types = {
'name': List[TapiCommonNameAndValue],
'uuid': str,
'operational_state': TapiCommonOperationalState,
'lifecycle_state': TapiCommonLifecycleState,
'supported_client_link': List[TapiTopologyLinkRef],
'lower_connection': List[TapiConnectivityConnectionRef],
'switch_control': List[TapiConnectivitySwitchControl],
'route': List[TapiConnectivityRoute],
'layer_protocol_name': TapiCommonLayerProtocolName,
'connection_end_point': List[TapiConnectivityConnectionEndPointRef],
'direction': TapiCommonForwardingDirection
}
self.attribute_map = {
'name': 'name',
'uuid': 'uuid',
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'supported_client_link': 'supported-client-link',
'lower_connection': 'lower-connection',
'switch_control': 'switch-control',
'route': 'route',
'layer_protocol_name': 'layer-protocol-name',
'connection_end_point': 'connection-end-point',
'direction': 'direction'
}
self._name = name
self._uuid = uuid
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._supported_client_link = supported_client_link
self._lower_connection = lower_connection
self._switch_control = switch_control
self._route = route
self._layer_protocol_name = layer_protocol_name
self._connection_end_point = connection_end_point
self._direction = direction
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityConnection':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.Connection of this TapiConnectivityConnection. # noqa: E501
:rtype: TapiConnectivityConnection
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this TapiConnectivityConnection.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiConnectivityConnection.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiConnectivityConnection.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiConnectivityConnection.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def uuid(self):
"""Gets the uuid of this TapiConnectivityConnection.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this TapiConnectivityConnection.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TapiConnectivityConnection.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this TapiConnectivityConnection.
:type uuid: str
"""
self._uuid = uuid
@property
def operational_state(self):
"""Gets the operational_state of this TapiConnectivityConnection.
:return: The operational_state of this TapiConnectivityConnection.
:rtype: TapiCommonOperationalState
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state):
"""Sets the operational_state of this TapiConnectivityConnection.
:param operational_state: The operational_state of this TapiConnectivityConnection.
:type operational_state: TapiCommonOperationalState
"""
self._operational_state = operational_state
@property
def lifecycle_state(self):
"""Gets the lifecycle_state of this TapiConnectivityConnection.
:return: The lifecycle_state of this TapiConnectivityConnection.
:rtype: TapiCommonLifecycleState
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""Sets the lifecycle_state of this TapiConnectivityConnection.
:param lifecycle_state: The lifecycle_state of this TapiConnectivityConnection.
:type lifecycle_state: TapiCommonLifecycleState
"""
self._lifecycle_state = lifecycle_state
@property
def supported_client_link(self):
"""Gets the supported_client_link of this TapiConnectivityConnection.
none # noqa: E501
:return: The supported_client_link of this TapiConnectivityConnection.
:rtype: List[TapiTopologyLinkRef]
"""
return self._supported_client_link
@supported_client_link.setter
def supported_client_link(self, supported_client_link):
"""Sets the supported_client_link of this TapiConnectivityConnection.
none # noqa: E501
:param supported_client_link: The supported_client_link of this TapiConnectivityConnection.
:type supported_client_link: List[TapiTopologyLinkRef]
"""
self._supported_client_link = supported_client_link
@property
def lower_connection(self):
"""Gets the lower_connection of this TapiConnectivityConnection.
An Connection object supports a recursive aggregation relationship such that the internal construction of an Connection can be exposed as multiple lower level Connection objects (partitioning). Aggregation is used as for the Node/Topology to allow changes in hierarchy. Connection aggregation reflects Node/Topology aggregation. The FC represents a Cross-Connection in an NE. The Cross-Connection in an NE is not necessarily the lowest level of FC partitioning. # noqa: E501
:return: The lower_connection of this TapiConnectivityConnection.
:rtype: List[TapiConnectivityConnectionRef]
"""
return self._lower_connection
@lower_connection.setter
def lower_connection(self, lower_connection):
"""Sets the lower_connection of this TapiConnectivityConnection.
An Connection object supports a recursive aggregation relationship such that the internal construction of an Connection can be exposed as multiple lower level Connection objects (partitioning). Aggregation is used as for the Node/Topology to allow changes in hierarchy. Connection aggregation reflects Node/Topology aggregation. The FC represents a Cross-Connection in an NE. The Cross-Connection in an NE is not necessarily the lowest level of FC partitioning. # noqa: E501
:param lower_connection: The lower_connection of this TapiConnectivityConnection.
:type lower_connection: List[TapiConnectivityConnectionRef]
"""
self._lower_connection = lower_connection
@property
def switch_control(self):
"""Gets the switch_control of this TapiConnectivityConnection.
none # noqa: E501
:return: The switch_control of this TapiConnectivityConnection.
:rtype: List[TapiConnectivitySwitchControl]
"""
return self._switch_control
@switch_control.setter
def switch_control(self, switch_control):
"""Sets the switch_control of this TapiConnectivityConnection.
none # noqa: E501
:param switch_control: The switch_control of this TapiConnectivityConnection.
:type switch_control: List[TapiConnectivitySwitchControl]
"""
self._switch_control = switch_control
@property
def route(self):
"""Gets the route of this TapiConnectivityConnection.
none # noqa: E501
:return: The route of this TapiConnectivityConnection.
:rtype: List[TapiConnectivityRoute]
"""
return self._route
@route.setter
def route(self, route):
"""Sets the route of this TapiConnectivityConnection.
none # noqa: E501
:param route: The route of this TapiConnectivityConnection.
:type route: List[TapiConnectivityRoute]
"""
self._route = route
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiConnectivityConnection.
:return: The layer_protocol_name of this TapiConnectivityConnection.
:rtype: TapiCommonLayerProtocolName
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiConnectivityConnection.
:param layer_protocol_name: The layer_protocol_name of this TapiConnectivityConnection.
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self._layer_protocol_name = layer_protocol_name
@property
def connection_end_point(self):
"""Gets the connection_end_point of this TapiConnectivityConnection.
none # noqa: E501
:return: The connection_end_point of this TapiConnectivityConnection.
:rtype: List[TapiConnectivityConnectionEndPointRef]
"""
return self._connection_end_point
@connection_end_point.setter
def connection_end_point(self, connection_end_point):
"""Sets the connection_end_point of this TapiConnectivityConnection.
none # noqa: E501
:param connection_end_point: The connection_end_point of this TapiConnectivityConnection.
:type connection_end_point: List[TapiConnectivityConnectionEndPointRef]
"""
self._connection_end_point = connection_end_point
@property
def direction(self):
"""Gets the direction of this TapiConnectivityConnection.
:return: The direction of this TapiConnectivityConnection.
:rtype: TapiCommonForwardingDirection
"""
return self._direction
@direction.setter
def direction(self, direction):
"""Sets the direction of this TapiConnectivityConnection.
:param direction: The direction of this TapiConnectivityConnection.
:type direction: TapiCommonForwardingDirection
"""
self._direction = direction
| {
"content_hash": "07cc665b04130f805226ba04eeabc6ed",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 599,
"avg_line_length": 44.91379310344828,
"alnum_prop": 0.6920665387076136,
"repo_name": "OpenNetworkingFoundation/ONFOpenTransport",
"id": "8a2fdef5b4be53d206ba7891ceddcf78e339fd7e",
"size": "15647",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_connectivity_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2562"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import mock
from mopidy_emby import library, playback
from mopidy_emby.backend import EmbyBackend
@mock.patch('mopidy_emby.backend.EmbyHandler', autospec=True)
def test_backend(embyhander_mock, config):
backend = EmbyBackend(config, mock.Mock())
assert backend.uri_schemes == ['emby']
assert isinstance(backend.library, library.EmbyLibraryProvider)
assert isinstance(backend.playback, playback.EmbyPlaybackProvider)
assert backend.playlist is None
| {
"content_hash": "076eb5bdcaf1395c72515fdd86a4e844",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 30.235294117647058,
"alnum_prop": 0.77431906614786,
"repo_name": "xsteadfastx/mopidy-emby",
"id": "892dbc54f22c72c38780afff397a21d61dcdbc48",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "398"
},
{
"name": "Python",
"bytes": "45144"
}
],
"symlink_target": ""
} |
"""Deleting resource tag(s)"""
from baseCmd import *
from baseResponse import *
class deleteTagsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""Delete tags for resource id(s)"""
"""Required"""
self.resourceids = []
self.typeInfo['resourceids'] = 'list'
"""Delete tag by resource type"""
"""Required"""
self.resourcetype = None
self.typeInfo['resourcetype'] = 'string'
"""Delete tags matching key/value pairs"""
self.tags = []
self.typeInfo['tags'] = 'map'
self.required = ["resourceids", "resourcetype", ]
class deleteTagsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
| {
"content_hash": "f634f944bbfd449e31542ff5b68b3be1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 61,
"avg_line_length": 28.742857142857144,
"alnum_prop": 0.5825049701789264,
"repo_name": "MissionCriticalCloud/marvin",
"id": "c87e73d5f66d8785b12375be71e4265dfb34892d",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marvin/cloudstackAPI/deleteTags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2573421"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from itertools import starmap
from telemetry.core import util
from telemetry.page import legacy_page_test
from telemetry.value import scalar
from measurements import timeline_controller
import py_utils
class BlinkStyle(legacy_page_test.LegacyPageTest):
def __init__(self):
super(BlinkStyle, self).__init__()
self._controller = None
def WillNavigateToPage(self, page, tab):
self._controller = timeline_controller.TimelineController()
self._controller.trace_categories = 'blink_style,blink.console'
self._controller.SetUp(page, tab)
self._controller.Start(tab)
def DidRunPage(self, platform):
if self._controller:
self._controller.CleanUp(platform)
def ValidateAndMeasurePage(self, page, tab, results):
with tab.action_runner.CreateInteraction('wait-for-quiescence'):
tab.ExecuteJavaScript('console.time("");')
try:
util.WaitFor(tab.HasReachedQuiescence, 15)
except py_utils.TimeoutException:
# Some sites never reach quiesence. As this benchmark normalizes/
# categories results, it shouldn't be necessary to reach the same
# state on every run.
pass
tab.ExecuteJavaScript('''
for (var i = 0; i < 11; i++) {
var cold = i % 2 == 0;
var name = "update_style";
if (cold) name += "_cold";
console.time(name);
// Occasionally documents will break the APIs we need
try {
// On cold runs, force a new StyleResolver
if (cold) {
var style = document.createElement("style");
document.head.appendChild(style);
style.remove();
}
// Invalidate style for the whole document
document.documentElement.lang += "z";
// Force a style update (but not layout)
getComputedStyle(document.documentElement).color;
} catch (e) {}
console.timeEnd(name);
}''')
self._controller.Stop(tab, results)
renderer = self._controller.model.GetRendererThreadFromTabId(tab.id)
markers = [event for event in renderer.async_slices
if event.name.startswith('update_style')
and event.category == 'blink.console']
# Drop the first run.
markers = markers[1:]
assert len(markers) == 10
def duration(event):
if event.has_thread_timestamps:
return event.thread_duration
else:
return event.duration
for marker in markers:
for event in renderer.all_slices:
if (event.name == 'Document::updateStyle'
and event.start >= marker.start
and event.end <= marker.end):
access_count = event.args.get('resolverAccessCount')
if access_count is None:
# absent in earlier versions
continue
min_access_count = 50
if access_count >= min_access_count:
result = 1000 * (duration(event) / access_count)
results.AddValue(scalar.ScalarValue(
page, marker.name, 'ms/1000 elements', result))
class ParserEvent(object):
def __init__(self, summary_event, tokenize_event, parse_event):
min_sheet_length = 1000
ua_sheet_mode = 5
enormous_token_threshold = 100
large_token_threshold = 5
self.mode = summary_event.args.get('mode')
self.length = summary_event.args.get('length')
self.tokens = summary_event.args.get('tokenCount')
self.tokenize_duration = duration(tokenize_event)
self.parse_duration = duration(parse_event)
self.chars_per_token = 0
if self.tokens:
self.chars_per_token = self.length / float(self.tokens)
if self.mode == ua_sheet_mode or self.length < min_sheet_length:
self.category = 'ignored'
elif self.chars_per_token > enormous_token_threshold:
self.category = 'enormous_tokens'
elif self.chars_per_token > large_token_threshold:
self.category = 'large_tokens'
else:
self.category = 'regular'
parser_events = [event for event in renderer.all_slices
if event.name == 'CSSParserImpl::parseStyleSheet'
or event.name == 'CSSParserImpl::parseStyleSheet.tokenize'
or event.name == 'CSSParserImpl::parseStyleSheet.parse']
merged_events = starmap(ParserEvent, zip(*[iter(parser_events)] * 3))
events_by_category = defaultdict(list)
for event in merged_events:
if event.category != 'ignored':
events_by_category[event.category].append(event)
for category, events in events_by_category.items():
parse_duration = sum(event.parse_duration for event in events)
tokenize_duration = sum(event.tokenize_duration for event in events)
tokens = sum(event.tokens for event in events)
length = sum(event.length for event in events)
results.AddValue(
scalar.ScalarValue(page, ('parse_css_%s' % category),
'tokens/s', 1000 / (parse_duration / tokens)))
results.AddValue(
scalar.ScalarValue(page, ('tokenize_css_%s' % category),
'char/s', 1000 / (tokenize_duration / length)))
| {
"content_hash": "a0f1a1b906ecd2425c7cdb9c3f321af9",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 37.892857142857146,
"alnum_prop": 0.6216776625824694,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "dae97c74cf8044df1892dda356579e3c9605d728",
"size": "5468",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/perf/measurements/blink_style.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Parser for N42 ascii files
--------------------------
File format description:
https://www.nist.gov/programs-projects/ansiieee-n4242-standard
"""
import numpy as np
def parse_n42(fname, header_length=36, footer_length=4, to_energy=True):
"""Parse a N42 file
Parameters
----------
- fname : str
input file name
- header_length : int
line number at which ends the header [36]
- footer_length : int
line number at which the footer starts [4]
- to_energy : bool
convert to energy (eV) [True]
"""
def _getfloat(value):
return float(value.replace(',', '.').split(' ')[0])
#: header dict with metadata
hdict = {}
with open(fname) as f:
lines = f.read().splitlines()
# header = [line for line in lines[:header_length]]
# footer = [line for line in lines[-footer_length:]]
ydata = np.array([int(line) for line in lines[header_length:-footer_length]])
xdata = np.array(range(len(ydata)))
#: energy conversion coefficients
ene_calib = lines[17].split('\t')[-1].split(' ')
ene_calib = [_getfloat(coeff) for coeff in ene_calib]
hdict['ene_calib'] = ene_calib
#: convert to energy
if to_energy:
xdata = xdata * ene_calib[1] * 1000
return xdata, ydata, hdict
| {
"content_hash": "d27e71aa23b10741df7d3fddabd004d1",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 26.6734693877551,
"alnum_prop": 0.5990818668706962,
"repo_name": "maurov/xraysloth",
"id": "f8410944a05b90d26eedf65ef7b99adcde34e148",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sloth/io/n42.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "IDL",
"bytes": "882"
},
{
"name": "Jupyter Notebook",
"bytes": "328173"
},
{
"name": "Python",
"bytes": "791348"
},
{
"name": "Shell",
"bytes": "3536"
}
],
"symlink_target": ""
} |
import doctest
if __name__ == '__main__':
doctest.testfile('doctest_in_help.txt') | {
"content_hash": "297c964683fd776e668839d91c13d832",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 21.5,
"alnum_prop": 0.627906976744186,
"repo_name": "gaufung/PythonStandardLibrary",
"id": "abb05301edd04f2073f9e80d8709d0ad64a17033",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DevelopTool/doctest/doctest_testfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3175176"
},
{
"name": "Python",
"bytes": "70796"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.